hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a4590b8ade146a3172823cb334cb4ab14d8475d4
| 2,156
|
py
|
Python
|
vspk/v6/fetchers/nuredundancygroups_fetcher.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 19
|
2016-03-07T12:34:22.000Z
|
2020-06-11T11:09:02.000Z
|
vspk/v6/fetchers/nuredundancygroups_fetcher.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 40
|
2016-06-13T15:36:54.000Z
|
2020-11-10T18:14:43.000Z
|
vspk/v6/fetchers/nuredundancygroups_fetcher.py
|
axxyhtrx/vspk-python
|
4495882c6bcbb1ef51b14b9f4dc7efe46476ff50
|
[
"BSD-3-Clause"
] | 15
|
2016-06-10T22:06:01.000Z
|
2020-12-15T18:37:42.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from bambou import NURESTFetcher
class NURedundancyGroupsFetcher(NURESTFetcher):
""" Represents a NURedundancyGroups fetcher
Notes:
This fetcher enables to fetch NURedundancyGroup objects.
See:
bambou.NURESTFetcher
"""
@classmethod
def managed_class(cls):
""" Return NURedundancyGroup class that is managed.
Returns:
.NURedundancyGroup: the managed class
"""
from .. import NURedundancyGroup
return NURedundancyGroup
| 40.679245
| 86
| 0.731447
|
0855d5ac1e109e95464507e215c8de1992bce1b0
| 3,763
|
py
|
Python
|
fHDHR/device/tuners/tuner.py
|
deathbybandaid/fHDHR_NewsOn
|
06d205a3ca677b88fa93b9b7503465aed1838c6b
|
[
"WTFPL"
] | 2
|
2021-11-21T18:45:35.000Z
|
2022-01-11T16:11:48.000Z
|
fHDHR/device/tuners/tuner.py
|
deathbybandaid/fHDHR_NewsOn
|
06d205a3ca677b88fa93b9b7503465aed1838c6b
|
[
"WTFPL"
] | null | null | null |
fHDHR/device/tuners/tuner.py
|
deathbybandaid/fHDHR_NewsOn
|
06d205a3ca677b88fa93b9b7503465aed1838c6b
|
[
"WTFPL"
] | null | null | null |
import threading
import datetime
from fHDHR.exceptions import TunerError
from fHDHR.tools import humanized_time
from .stream import Stream
class Tuner():
def __init__(self, fhdhr, inum, epg):
self.fhdhr = fhdhr
self.number = inum
self.epg = epg
self.tuner_lock = threading.Lock()
self.set_off_status()
self.chanscan_url = "%s/api/channels?method=scan"
self.close_url = "/api/tuners?method=close&tuner=%s" % str(self.number)
def channel_scan(self, grabbed=False):
if self.tuner_lock.locked() and not grabbed:
self.fhdhr.logger.error("Tuner #%s is not available." % str(self.number))
raise TunerError("804 - Tuner In Use")
if self.status["status"] == "Scanning":
self.fhdhr.logger.info("Channel Scan Already In Progress!")
else:
if not grabbed:
self.tuner_lock.acquire()
self.status["status"] = "Scanning"
self.fhdhr.logger.info("Tuner #%s Performing Channel Scan." % str(self.number))
chanscan = threading.Thread(target=self.runscan)
chanscan.start()
def runscan(self):
self.fhdhr.api.get(self.chanscan_url)
self.fhdhr.logger.info("Requested Channel Scan Complete.")
self.close()
self.fhdhr.api.get(self.close_url)
def add_downloaded_size(self, bytes_count):
if "downloaded" in list(self.status.keys()):
self.status["downloaded"] += bytes_count
def grab(self, channel_number):
if self.tuner_lock.locked():
self.fhdhr.logger.error("Tuner #" + str(self.number) + " is not available.")
raise TunerError("804 - Tuner In Use")
self.tuner_lock.acquire()
self.status["status"] = "Acquired"
self.status["channel"] = channel_number
self.fhdhr.logger.info("Tuner #%s Acquired." % str(self.number))
def close(self):
self.set_off_status()
if self.tuner_lock.locked():
self.tuner_lock.release()
self.fhdhr.logger.info("Tuner #" + str(self.number) + " Released.")
def get_status(self):
current_status = self.status.copy()
if current_status["status"] == "Active":
current_status["Play Time"] = str(
humanized_time(
int((datetime.datetime.utcnow() - current_status["time_start"]).total_seconds())))
current_status["time_start"] = str(current_status["time_start"])
current_status["epg"] = self.epg.whats_on_now(current_status["channel"])
return current_status
def set_off_status(self):
self.status = {"status": "Inactive"}
def get_stream(self, stream_args, tuner):
stream = Stream(self.fhdhr, stream_args, tuner)
return stream.get()
def set_status(self, stream_args):
if self.status["status"] != "Active":
self.status = {
"status": "Active",
"clients": [],
"clients_id": [],
"method": stream_args["method"],
"accessed": [stream_args["accessed"]],
"channel": stream_args["channel"],
"proxied_url": stream_args["channelUri"],
"time_start": datetime.datetime.utcnow(),
"downloaded": 0
}
if stream_args["client"] not in self.status["clients"]:
self.status["clients"].append(stream_args["client"])
if stream_args["client_id"] not in self.status["clients_id"]:
self.status["clients_id"].append(stream_args["client_id"])
| 38.010101
| 102
| 0.575605
|
375bf492e8e99ca5c8ace7c2685d72abbd9f5518
| 1,444
|
py
|
Python
|
autogram/pictures/management/commands/scrapmedia.py
|
ohduran/autogram
|
e24c7ff40c44cd0eabf8018e61ad5fe0b422a6a1
|
[
"MIT"
] | null | null | null |
autogram/pictures/management/commands/scrapmedia.py
|
ohduran/autogram
|
e24c7ff40c44cd0eabf8018e61ad5fe0b422a6a1
|
[
"MIT"
] | null | null | null |
autogram/pictures/management/commands/scrapmedia.py
|
ohduran/autogram
|
e24c7ff40c44cd0eabf8018e61ad5fe0b422a6a1
|
[
"MIT"
] | null | null | null |
import subprocess
from django.contrib.auth import get_user_model
from django.core.management import call_command
from django.core.management.base import BaseCommand
User = get_user_model()
class Command(BaseCommand):
help = 'Pulls media from instagram into the scraped_media folder'
requires_migrations_checks = True
def add_arguments(self, parser):
"""python manage.py scrapmedia demo_ig_scraper"""
parser.add_argument('owners', nargs='+', type=str)
def handle(self, *args, **options):
"""
instagram-scraper -u settings.BOT_USERNAME -p settings.BOT_PASSWORD sightsofbcn -d scraped_media --media-metadata
And then, call_command loadscrapedmedia on the resulting file.
"""
command_template = 'instagram-scraper -u {} -p {} {} -d scraped_media --media-metadata'
for owner in options['owners']:
bot = User.objects.first()
bash_command = command_template.format(bot.username, bot.para, owner)
self.stdout.write(self.style.WARNING(f'Now running {bash_command}'))
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
if not error:
self.stdout.write(self.style.WARNING('Loading scraped media from scraped_media/{}.json'.format(owner)))
call_command('loadmedia', 'scraped_media/{}.json'.format(owner))
| 41.257143
| 121
| 0.682133
|
498f6c43452c82854226b265a24fc1c03e2bb2aa
| 1,049
|
py
|
Python
|
test/errortest.py
|
AntonYamkovoy/convolutional_NN
|
2d5aab86880c11f74abd7671083f44eee8a36458
|
[
"MIT"
] | 1
|
2020-02-25T15:21:15.000Z
|
2020-02-25T15:21:15.000Z
|
test/errortest.py
|
AntonYamkovoy/convolutional_NN
|
2d5aab86880c11f74abd7671083f44eee8a36458
|
[
"MIT"
] | null | null | null |
test/errortest.py
|
AntonYamkovoy/convolutional_NN
|
2d5aab86880c11f74abd7671083f44eee8a36458
|
[
"MIT"
] | null | null | null |
import numpy as np
import numpy.linalg as la
import scipy.signal as scisig
from func import *
B23 = np.asarray([
[1, 0,-1, 0],
[0, 1, 1, 0],
[0,-1, 1, 0],
[0, 1, 0,-1]
]).T
G23 = np.asarray([
[ 1, 0, 0],
[.5, .5,.5],
[.5,-.5,.5],
[ 0, 0, 1]
])
A23 = np.asarray([
[1,1,1,0],
[0,1,-1,-1]
]).T
g = np.random.random((4,4))
f = np.random.random((3,3))
direct = np.zeros((2,2))
for i in range(2):
for j in range(2):
direct[i,j] = np.sum(f * g[i:i+3,j:j+3])
inner = np.dot(G23, np.dot(f, G23.T)) * np.dot(B23.T, np.dot(g, B23))
Y = np.dot(A23.T, np.dot(inner, A23))
print("Error of one Winograd",la.norm(Y - direct)/la.norm(direct))
convLib = scisig.convolve2d(f,g)
conv2d = convolve2DToeplitz(f,g)
g2 = revMatrix(g)
g2 = padImage(g2,len(f))
cWino = simpleWinogradAlg(f,g2,2,B23,G23,A23)[0]
cWino = revMatrix(cWino)
print(convLib)
print(cWino)
print("Error:",la.norm(convLib - conv2d, ord=2)/la.norm(convLib, ord=2))
print("Error:",la.norm(convLib - cWino, ord=2)/la.norm(convLib, ord=2))
| 19.792453
| 72
| 0.581506
|
2e2855c8ccdec04b5000aeff9a7a56c901c44440
| 6,381
|
py
|
Python
|
userbot/plugins/afk.py
|
midhunkm1294-bit/TeleBot
|
b4309fb662e834d9d3826172b69fd07d42ef83a2
|
[
"MIT"
] | null | null | null |
userbot/plugins/afk.py
|
midhunkm1294-bit/TeleBot
|
b4309fb662e834d9d3826172b69fd07d42ef83a2
|
[
"MIT"
] | null | null | null |
userbot/plugins/afk.py
|
midhunkm1294-bit/TeleBot
|
b4309fb662e834d9d3826172b69fd07d42ef83a2
|
[
"MIT"
] | null | null | null |
"""AFK Plugin for @UniBorg
Syntax: .afk REASON"""
import asyncio
import datetime
from datetime import datetime
from telethon import events
from telethon.tl import functions, types
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
global afk_start
global afk_end
USER_AFK = {}
afk_time = None
last_afk_message = {}
afk_start = {}
@borg.on(events.NewMessage(pattern=r"\.afk ?(.*)", outgoing=True)) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
global afk_start
global afk_end
global reason
USER_AFK = {}
afk_time = None
last_afk_message = {}
afk_end = {}
start_1 = datetime.now()
afk_start = start_1.replace(microsecond=0)
reason = event.pattern_match.group(1)
if not USER_AFK: # pylint:disable=E0602
last_seen_status = await borg( # pylint:disable=E0602
functions.account.GetPrivacyRequest(
types.InputPrivacyKeyStatusTimestamp()
)
)
if isinstance(last_seen_status.rules, types.PrivacyValueAllowAll):
afk_time = datetime.datetime.now() # pylint:disable=E0602
USER_AFK = f"yes: {reason}" # pylint:disable=E0602
if reason:
await borg.send_message(event.chat_id, f"**I shall be Going afk!** __because ~ {reason}__")
else:
await borg.send_message(event.chat_id, f"**I am Going afk!**")
await asyncio.sleep(5)
await event.delete()
try:
await borg.send_message( # pylint:disable=E0602
Config.PRIVATE_GROUP_BOT_API_ID, # pylint:disable=E0602
f"Set AFK mode to True, and Reason is {reason}"
)
except Exception as e: # pylint:disable=C0103,W0703
logger.warn(str(e)) # pylint:disable=E0602
@borg.on(events.NewMessage(outgoing=True)) # pylint:disable=E0602
async def set_not_afk(event):
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
global afk_start
global afk_end
back_alive = datetime.now()
afk_end = back_alive.replace(microsecond=0)
if afk_start != {}:
total_afk_time = str((afk_end - afk_start))
current_message = event.message.message
if ".afk" not in current_message and "yes" in USER_AFK: # pylint:disable=E0602
shite = await borg.send_message(event.chat_id, "__Back alive!__\n**No Longer afk.**\n `Was afk for:``" + total_afk_time + "`")
try:
await borg.send_message( # pylint:disable=E0602
Config.PRIVATE_GROUP_BOT_API_ID, # pylint:disable=E0602
"Set AFK mode to False"
)
except Exception as e: # pylint:disable=C0103,W0703
await borg.send_message( # pylint:disable=E0602
event.chat_id,
"Please set `PRIVATE_GROUP_BOT_API_ID` " + \
"for the proper functioning of afk functionality " + \
"in @FRIDAYSUPPORTOFFICIAL\nCheck pinned message for more info.\n\n `{}`".format(str(e)),
reply_to=event.message.id,
silent=True
)
await asyncio.sleep(5)
await shite.delete()
USER_AFK = {} # pylint:disable=E0602
afk_time = None # pylint:disable=E0602
@borg.on(events.NewMessage( # pylint:disable=E0602
incoming=True,
func=lambda e: bool(e.mentioned or e.is_private)
))
async def on_afk(event):
if event.fwd_from:
return
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
global afk_start
global afk_end
back_alivee = datetime.now()
afk_end = back_alivee.replace(microsecond=0)
if afk_start != {}:
total_afk_time = str((afk_end - afk_start))
afk_since = "**a while ago**"
current_message_text = event.message.message.lower()
if "afk" in current_message_text:
# userbot's should not reply to other userbot's
# https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots
return False
if USER_AFK and not (await event.get_sender()).bot: # pylint:disable=E0602
if afk_time: # pylint:disable=E0602
now = datetime.datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**Yesterday**"
elif days > 1:
if days > 6:
date = now + \
datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"`{int(hours)}h{int(minutes)}m` **ago**"
elif minutes > 0:
afk_since = f"`{int(minutes)}m{int(seconds)}s` **ago**"
else:
afk_since = f"`{int(seconds)}s` **ago**"
msg = None
message_to_reply = f"__My Master Has Been Gone For__ `{total_afk_time}`\nWhere He Is: ~~ONLY ME KNOWS~~ " + \
f"\n\n__I promise I'll back in a few light years__\n**REASON**: {reason}" \
if reason \
else f"**Heya!**\n__I am currently unavailable. Since when, you ask? For {total_afk_time} I guess.__\n\nWhen will I be back? ~~Soon~~ __Whenever I feel like it__**( ಠ ʖ̯ ಠ)** "
msg = await event.reply(message_to_reply)
await asyncio.sleep(5)
if event.chat_id in last_afk_message: # pylint:disable=E0602
await last_afk_message[event.chat_id].delete() # pylint:disable=E0602
last_afk_message[event.chat_id] = msg # pylint:disable=E0602
| 40.643312
| 189
| 0.608212
|
5da0043977c725a2f1f0fb4fdc05242250aa1168
| 3,543
|
py
|
Python
|
dit/algorithms/prune_expand.py
|
Ejjaffe/dit
|
c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1
|
[
"BSD-3-Clause"
] | 1
|
2020-03-13T10:30:11.000Z
|
2020-03-13T10:30:11.000Z
|
dit/algorithms/prune_expand.py
|
Ejjaffe/dit
|
c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1
|
[
"BSD-3-Clause"
] | null | null | null |
dit/algorithms/prune_expand.py
|
Ejjaffe/dit
|
c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Functions for pruning or expanding the sample space of a distribution.
This can be important when calculating meet and join random variables. It
is also important for the calculations of various PID quantities.
"""
from dit.samplespace import ScalarSampleSpace, SampleSpace, CartesianProduct
__all__ = (
'expanded_samplespace',
'pruned_samplespace',
)
def pruned_samplespace(d, sample_space=None):
"""
Returns a new distribution with pruned sample space.
The pruning is such that zero probability outcomes are removed.
Parameters
----------
d : distribution
The distribution used to create the pruned distribution.
sample_space : set
A list of outcomes with zero probability that should be kept in the
sample space. If `None`, then all outcomes with zero probability
will be removed.
Returns
-------
pd : distribution
The distribution with a pruned sample space.
"""
if sample_space is None:
sample_space = []
keep = set(sample_space)
outcomes = []
pmf = []
for o, p in d.zipped(mode='atoms'):
if not d.ops.is_null_exact(p) or o in keep:
outcomes.append(o)
pmf.append(p)
if d.is_joint():
sample_space = SampleSpace(outcomes)
else:
sample_space = ScalarSampleSpace(outcomes)
pd = d.__class__(outcomes, pmf,
sample_space=sample_space, base=d.get_base())
return pd
def expanded_samplespace(d, alphabets=None, union=True):
"""
Returns a new distribution with an expanded sample space.
Expand the sample space so that it is the Cartesian product of the
alphabets for each random variable. Note, only the effective alphabet of
each random variable is used. So if one index in an outcome only has the
value 1, then its alphabet is `[1]`, and not `[0, 1]` for example.
Parameters
----------
d : distribution
The distribution used to create the pruned distribution.
alphabets : list
A list of alphabets, with length equal to the outcome length in `d`.
Each alphabet specifies the alphabet to be used for a single index
random variable. The sample space of the new distribution will be the
Cartesian product of these alphabets.
union : bool
If True, then the alphabet for each random variable is unioned.
The unioned alphabet is then used for each random variable.
Returns
-------
ed : distribution
The distribution with an expanded sample space.
Notes
-----
The default constructor for Distribution will create a Cartesian product
sample space if not sample space is provided.
"""
joint = d.is_joint()
if alphabets is None:
# Note, we sort the alphabets now, so we are possibly changing the
# order of the original sample space.
alphabets = list(map(sorted, d.alphabet))
elif joint and len(alphabets) != d.outcome_length():
L = len(alphabets)
raise Exception("You need to provide {0} alphabets".format(L))
if joint and union:
alphabet = set.union(*map(set, alphabets))
alphabet = sorted(alphabet)
alphabets = [alphabet] * len(alphabets)
if joint:
sample_space = CartesianProduct(alphabets, d._product)
else:
sample_space = ScalarSampleSpace(alphabets)
ed = d.__class__(d.outcomes, d.pmf,
sample_space=sample_space, base=d.get_base())
return ed
| 30.282051
| 77
| 0.661586
|
ee55d72ccce61398f79bfd69b9fbc88e9af21d45
| 2,324
|
py
|
Python
|
account/types.py
|
TravelChain/golos-ql
|
a2acad0b56d349f3811b2bd0fc8ec1ce3257156c
|
[
"MIT"
] | 5
|
2018-08-28T20:54:54.000Z
|
2022-02-09T21:21:53.000Z
|
account/types.py
|
TravelChain/golos-ql
|
a2acad0b56d349f3811b2bd0fc8ec1ce3257156c
|
[
"MIT"
] | null | null | null |
account/types.py
|
TravelChain/golos-ql
|
a2acad0b56d349f3811b2bd0fc8ec1ce3257156c
|
[
"MIT"
] | 2
|
2018-09-26T06:28:34.000Z
|
2018-11-20T20:14:00.000Z
|
from contextlib import suppress
from mongoengine.base.datastructures import BaseDict
import graphene
from graphene.relay import Node
from graphene_mongo import MongoengineObjectType
from graphene.types.generic import GenericScalar
from account.models import (
AccountModel,
AccountAuthorityModel
)
from common.utils import prepare_json
from common.types import GeometryObjectType
class AccountLocation(graphene.ObjectType):
properties = GenericScalar()
geometry = graphene.Field(GeometryObjectType)
def resolve_properties(self, info):
return self.get('properties', {})
def resolve_geometry(self, info):
return self.get('geometry', {})
class MapalaProfile(graphene.ObjectType):
avatar = graphene.String()
location = graphene.Field(AccountLocation)
def resolve_avatar(self, info):
return self.get('avatar')
def resolve_location(self, info):
return self.get('location', {})
class AccountProfile(graphene.ObjectType):
profile_image = graphene.String()
website = graphene.String()
cover_image = graphene.String()
def resolve_cover_image(self, info):
with suppress(KeyError):
return self['cover_image']
def resolve_website(self, info):
with suppress(KeyError):
return self['website']
def resolve_profile_image(self, info):
with suppress(KeyError):
return self['profile_image']
class AccountMeta(graphene.ObjectType):
profile = graphene.Field(AccountProfile)
mapala_profile = graphene.Field(MapalaProfile)
def resolve_mapala_profile(self, info):
return self.get('mapalaProfile', {})
def resolve_profile(self, info):
return self.get('profile', {})
class Account(MongoengineObjectType):
meta = graphene.Field(AccountMeta)
json_metadata = GenericScalar()
def resolve_json_metadata(self, info):
return prepare_json(self.json_metadata)
class Meta:
model = AccountModel
interfaces = (Node,)
def resolve_meta(self, info):
if isinstance(self.json_metadata, BaseDict):
return self.json_metadata
else:
return {}
class AccountAuthority(MongoengineObjectType):
class Meta:
model = AccountAuthorityModel
interfaces = (Node,)
| 25.538462
| 52
| 0.699656
|
0fcf8f922461d77dfda43ee8ee05275c8a55f75c
| 3,245
|
py
|
Python
|
tests/test_elbv2_actions.py
|
awslabs/aws-az-failure-chaostoolkit
|
95f682ce3e96efbf8c6aee58123b5dbbdfd441ee
|
[
"Apache-2.0"
] | 6
|
2021-10-20T13:01:00.000Z
|
2022-02-23T12:32:31.000Z
|
tests/test_elbv2_actions.py
|
awslabs/aws-az-failure-chaostoolkit
|
95f682ce3e96efbf8c6aee58123b5dbbdfd441ee
|
[
"Apache-2.0"
] | 1
|
2022-02-09T09:33:02.000Z
|
2022-03-03T14:03:09.000Z
|
tests/test_elbv2_actions.py
|
awslabs/aws-az-failure-chaostoolkit
|
95f682ce3e96efbf8c6aee58123b5dbbdfd441ee
|
[
"Apache-2.0"
] | null | null | null |
import os
from unittest.mock import MagicMock, patch
from azchaosaws.elbv2.actions import fail_az
@patch("azchaosaws.elbv2.actions.client", autospec=True)
def test_fail_az_alb(client):
mock_client = MagicMock()
client.return_value = mock_client
az = "ap-southeast-1a"
dry_run = False
state_path = "test_fail_az_alb.json"
load_balancer_name = "my-load-balancer"
resource_arn = "arn:aws:elasticloadbalancing:ap-southeast-1:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188"
subnet_ids = ["subnet-0ecac448", "subnet-15aaab61", "subnet-b61f49f0"]
remaining_subnets = subnet_ids[1:3]
mock_client.get_paginator = get_mock_paginate
mock_client.describe_tags.return_value = {
"TagDescriptions": [
{
"ResourceArn": resource_arn,
"Tags": [
{"Key": "AZ_FAILURE", "Value": "True"},
],
},
]
}
mock_client.describe_load_balancers.return_value = {
"LoadBalancers": [
{
"LoadBalancerArn": resource_arn,
"LoadBalancerName": load_balancer_name,
"Type": "application",
"State": {"Code": "active"},
"AvailabilityZones": [
{"ZoneName": "ap-southeast-1a", "SubnetId": "subnet-0ecac448"},
{"ZoneName": "ap-southeast-1b", "SubnetId": "subnet-15aaab61"},
{"ZoneName": "ap-southeast-1c", "SubnetId": "subnet-b61f49f0"},
],
},
]
}
fail_az(az=az, dry_run=dry_run, state_path=state_path)
mock_client.set_subnets.assert_called_with(
LoadBalancerArn=resource_arn, Subnets=remaining_subnets
)
os.remove(state_path)
def get_mock_paginate(operation_name):
return {
"describe_load_balancers": MagicMock(
paginate=MagicMock(
return_value=[
{
"LoadBalancers": [
{
"LoadBalancerArn": "arn:aws:elasticloadbalancing:ap-southeast-1:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188",
"LoadBalancerName": "my-load-balancer",
"Type": "application",
"AvailabilityZones": [
{
"ZoneName": "ap-southeast-1a",
"SubnetId": "subnet-0ecac448",
},
{
"ZoneName": "ap-southeast-1b",
"SubnetId": "subnet-15aaab61",
},
{
"ZoneName": "ap-southeast-1c",
"SubnetId": "subnet-b61f49f0",
},
],
},
]
}
]
)
),
}.get(operation_name, MagicMock())
| 36.460674
| 161
| 0.465639
|
bca830b79db5f37ebf95e675900cffc6ef211b57
| 88
|
py
|
Python
|
static/apps.py
|
tututou/reportonlie_os
|
527a0325ef8b5a555566a1ca55414f88e9831884
|
[
"Apache-2.0"
] | null | null | null |
static/apps.py
|
tututou/reportonlie_os
|
527a0325ef8b5a555566a1ca55414f88e9831884
|
[
"Apache-2.0"
] | null | null | null |
static/apps.py
|
tututou/reportonlie_os
|
527a0325ef8b5a555566a1ca55414f88e9831884
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class StaticsConfig(AppConfig):
name = 'static'
| 14.666667
| 33
| 0.75
|
1da118faf46370f16834d57b394dfdae5a572ff2
| 1,051
|
py
|
Python
|
01-december/depth_rate.py
|
acatovic/AoC2021
|
aae151c73ab9c21c69e39d95126b9ffdd98e462e
|
[
"MIT"
] | null | null | null |
01-december/depth_rate.py
|
acatovic/AoC2021
|
aae151c73ab9c21c69e39d95126b9ffdd98e462e
|
[
"MIT"
] | null | null | null |
01-december/depth_rate.py
|
acatovic/AoC2021
|
aae151c73ab9c21c69e39d95126b9ffdd98e462e
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import sys
from typing import List
def count_num_depth_increases(data: List[int]) -> int:
curr_depth = data[0]
num_increases = 0
for i in range(1, len(data)):
if data[i] > curr_depth:
num_increases += 1
curr_depth = data[i]
return num_increases
def count_num_depth_window_increases(data: List[int], window_size: int = 3) -> int:
curr_window_sum = sum(data[:window_size])
num_increases = 0
for i in range(1, len(data) - window_size + 1):
next_window_sum = sum(data[i:i + window_size])
if next_window_sum > curr_window_sum:
num_increases += 1
curr_window_sum = next_window_sum
return num_increases
def load_sonar_data(path: str) -> List[int]:
data = []
with Path(path).open() as f:
for line in f:
data.append(int(line.strip()))
return data
def main():
path = sys.argv[1]
data = load_sonar_data(path)
print(count_num_depth_window_increases(data))
if __name__ == "__main__":
main()
| 28.405405
| 83
| 0.646051
|
ce002eed526bfbbd57021cdab0466d3dff9d8512
| 279
|
py
|
Python
|
src/flow.py
|
rodnover55/ha-flow-automation
|
66ada606720030020ab51c73ed70cb8d94659644
|
[
"MIT"
] | null | null | null |
src/flow.py
|
rodnover55/ha-flow-automation
|
66ada606720030020ab51c73ed70cb8d94659644
|
[
"MIT"
] | null | null | null |
src/flow.py
|
rodnover55/ha-flow-automation
|
66ada606720030020ab51c73ed70cb8d94659644
|
[
"MIT"
] | null | null | null |
class Flow:
def __init__(self, data, key):
self.__data = data
self.__key = key
def id(self) -> str:
return self.__data["flow_id"]
def validate_step(self, step) -> bool:
return True
def key(self) -> str:
return self.__key
| 21.461538
| 42
| 0.566308
|
ef04d6a54b381d0ad91f5f6f1fec9c494e0577df
| 16,695
|
py
|
Python
|
bin/parse_annotation.py
|
brianlee99/UVP
|
5b7ff26c09d84760d4220268f34fb4814848eb4a
|
[
"MIT"
] | null | null | null |
bin/parse_annotation.py
|
brianlee99/UVP
|
5b7ff26c09d84760d4220268f34fb4814848eb4a
|
[
"MIT"
] | null | null | null |
bin/parse_annotation.py
|
brianlee99/UVP
|
5b7ff26c09d84760d4220268f34fb4814848eb4a
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
import sys
import re
from string import join
""" The script accepts a SnpEff annotated VCF file and the sample ID name (string) as input options """
""" it parses files and creates a final annotation file that is in a ReseqTB mappable format """
"""
Author: Matthew Ezewudo
CPTR ReSeqTB Project - Critical Path Institute
"""
input1 = sys.argv[1]
input2 = sys.argv[2]
input3 = sys.argv[3]
position = ""
reference = ""
alternate = ""
annotation = ""
variant = ""
read_depth = ""
quality = ""
perc_alt = ""
nucleotide_change = ""
nuc_change = ""
transcript_pos = ""
amino_acid_change = ""
orig_aacid = ""
new_aacid = ""
codon_pos = ""
gene_name = ""
gene_id = ""
transcript = ""
annotation_details = ""
position1 = ""
reference1 = ""
alternate1 = ""
annotation1 = ""
variant1 = ""
read_depth1 = ""
quality1 = ""
perc_alt11 = ""
nucleotide_change1 = ""
transcript_pos1 = ""
amino_acid_change1 = ""
orig_aacid1 = ""
new_aacid1 = ""
codon_pos1 = ""
gene_name1 = ""
gene_id1 = ""
transcript1 = ""
annotation_details1 = ""
Block = False
(genez,genezid,start,stop,gene_anot,strand) = ([],[],[],[],[],[])
nuc_change = ""
dic = {'A':'T','T':'A','C':'G','G':'C'}
ref_comp = ""
alt_comp = ""
fh3 = open(input3,'r')
for lines in fh3:
lined = lines.rstrip("\r\n").split("\t")
if lines.startswith("H37Rv"):
continue
genez.append(lined[0])
genezid.append(lined[1])
start.append(lined[2])
stop.append(lined[3])
gene_anot.append(lined[4])
strand.append(lined[5])
fh1 = open(input1,'r')
print "Sample ID" + "\t" + "CHROM" + "\t" + "POS" + "\t" + "REF" + "\t" + "ALT" + "\t" + "Read Depth" + "\t" + "Quality" + "\t" + "Percent Alt allele" + "\t" + "Annotation" + "\t" + "Variant Type" + "\t" + "Nucleotide Change" + "\t" + "Position within CDS " + "\t" + "Amino acid change" + "\t" + "REF Amino acid" + "\t" + "ALT Amino Acid" + "\t" + "Codon Position" + "\t" "Gene name" + "\t" + "Gene ID" + "\t" + "Transcript ID" + "\t" + "Annotation details"
for lines in fh1:
if lines.startswith("#"):
continue
fields = lines.rstrip("\r\n").split("\t")
position = fields[1]
reference = fields[3]
alternate = fields[4]
quality = fields[5]
rarr = fields[9].split(":")
num_all_array = rarr[1].split(",")
num_all_2 = num_all_array[1]
read_depth = rarr[2]
num_all = rarr[3]
perc_alt1 = float(num_all_2)/float(read_depth)*100.0
if perc_alt1 > 100.0:
perc_alt1 = 100.0
perc_alt = "{0:.2f}".format(perc_alt1)
if float(read_depth) < 10.0:
continue
subfields = fields[7].split(";")
if subfields[-1].startswith("ANN"):
annot = subfields[-1]
else:
annot = subfields[-2]
subannot = annot.split(",")
smallannot = subannot[0].split("|")
if smallannot[2] == "MODIFIER":
for x in range(0,82):
if (int(start[x]) -1) < int(position) < (int(stop[x]) + 1):
annotation = gene_anot[x]
if genez[x] == 'rrs':
nuc_change = str((int(position)) - (int(start[x]) - 1))
gene_id = 'MTB000019'
nucleotide_change = "c." + nuc_change + reference + ">" + alternate
elif genez[x] == 'rrl':
nuc_change = str((int(position)) - (int(start[x]) - 1))
gene_id = 'MTB000020'
nucleotide_change = "c." + nuc_change + reference + ">" + alternate
elif genez[x] == 'crfA':
nuc_change = str((int(position)) - (int(start[x]) - 1))
gene_id = 'crfA'
nucleotide_change = "c." + nuc_change + reference + ">" + alternate
elif strand[x] == 'forward':
gene_id = genezid[x]
nuc_change = str((int(position)) - (int(stop[x]) + 1))
nucleotide_change = "c." + nuc_change + reference + ">" + alternate
elif strand[x] == 'reverse':
ref_comp = ""
alt_comp = ""
for char in reference:
ref_comp += dic[char]
for char in alternate:
alt_comp += dic[char]
gene_id = genezid[x]
nuc_change = str((int(start[x]) -1) - int(position))
nucleotide_change = "c." + nuc_change + ref_comp + ">" + alt_comp
gene_name = genez[x]
amino_acid_change = 'NA'
if len(fields[4]) > len(fields[3]):
if strand[x] == 'forward':
nucleotide_change = "c." + nuc_change + "_" + str(int(nuc_change) + 1) + "ins" + alternate[len(reference):]
if genez[x] == 'crfA':
transcript_pos = nuc_change + "-" + str(int(nuc_change) + 1)
elif strand[x] == 'reverse':
nucleotide_change = "c." + str(int(nuc_change) - 1) + "_" + nuc_change + "ins" + alt_comp[len(reference):][::-1]
variant = "Insertion"
elif len(fields[3]) > len(fields[4]):
if strand[x] == 'forward':
if len(reference) - len(alternate) == 1:
nucleotide_change = "c." + str(int(nuc_change) + len(alternate)) + "del" + reference[len(alternate):]
if genez[x] == 'crfA':
transcript_pos = str(int(nuc_change) + len(alternate))
else:
nucleotide_change = "c." + str(int(nuc_change) + len(alternate)) + "_" + str(int(nuc_change) + len(reference) - 1) + "del" + reference[len(alternate):]
if genez[x] == 'crfA':
transcript_pos = str(int(nuc_change) + len(alternate)) + "-" + str(int(nuc_change) + len(reference) - 1)
elif strand[x] == 'reverse':
if len(reference) - len(alternate) == 1:
nucleotide_change = "c." + str(int(nuc_change) - len(reference) - 1) + "del" + ref_comp[len(alternate):][::-1]
else:
nucleotide_change = "c." + str(int(nuc_change) - len(reference) - 1) + "_" + str(int(nuc_change) - len(alternate)) + "del" + ref_comp[len(alternate):][::-1]
variant = "Deletion"
else:
variant = "SNP"
transcript = 'NA'
if genez[x] == 'crfA':
transcript_pos = nuc_change
else:
transcript_pos = 'NA'
orig_aacid = 'NA'
new_aacid = 'NA'
codon_pos = 'NA'
annotation_details = ','.join(subannot[0:])
break
else:
annotation = 'Non-Coding'
variant = 'NA'
nucleotide_change = smallannot[9]
amino_acid_change = 'NA'
gene_name = 'NA'
gene_id = 'NA'
transcript = 'NA'
transcript_pos = 'NA'
orig_aacid = 'NA'
new_aacid = 'NA'
codon_pos = 'NA'
annotation_details = ','.join(subannot[1:])
if len(position1) != 0:
if Block == True:
print input2 + "\t" + fields[0] + "\t" + position1 + "\t" + reference1 + "\t" + alternate1 + "\t" + read_depth1 + "\t" + quality1 + "\t" + perc_alt11 + "\t" + annotation1 + "\t" + 'MNV' + "\t" + nucleotide_change1 + "\t" + transcript_pos1 + "\t" + amino_acid_change1 + "\t" + orig_aacid1 + "\t" + 'Block_Substitution' + "\t" + codon_pos1 + "\t" + gene_name1 + "\t" + gene_id1 + "\t" + transcript1 + "\t" + annotation_details1
Block = False
else:
print input2 + "\t" + fields[0] + "\t" + position1 + "\t" + reference1 + "\t" + alternate1 + "\t" + read_depth1 + "\t" + quality1 + "\t" + perc_alt11 + "\t" + annotation1 + "\t" + variant1 + "\t" + nucleotide_change1 + "\t" + transcript_pos1 + "\t" + amino_acid_change1 + "\t" + orig_aacid1 + "\t" + new_aacid1 + "\t" + codon_pos1 + "\t" + gene_name1 + "\t" + gene_id1 + "\t" + transcript1 + "\t" + annotation_details1
else:
if smallannot[10][2:5] == smallannot[10][-3:]:
annotation = 'Synonymous'
else:
annotation = 'Non-synonymous'
nucleotide_change = smallannot[9]
if len(smallannot[10]) < 1:
amino_acid_change = "NA"
else:
amino_acid_change = smallannot[10]
gene_name = smallannot[3]
gene_id = smallannot[4]
transcript = smallannot[6]
if gene_name == 'erm_37_':
gene_name = 'erm(37)'
annotation_details = ','.join(subannot[1:])
if 'del' in nucleotide_change or 'del' in amino_acid_change:
variant = 'Deletion'
elif 'ins' in nucleotide_change or 'ins' in amino_acid_change:
variant = 'Insertion'
elif 'dup' in nucleotide_change or 'dup' in amino_acid_change:
variant = 'Insertion'
else:
variant = 'SNP'
if variant == 'Insertion' or variant == 'Deletion':
new_aacid = 'NA'
if '_' in smallannot[9]:
array1 = smallannot[9].split("_")
po1 = array1[0].split(".")
pos1 = po1[1]
pos2 = re.findall(r'\d+', array1[1])[0]
transcript_pos = pos1 + "-" + pos2
else:
transcript_pos = re.findall(r'\d+', smallannot[9])[0]
if '_' in smallannot[10]:
array2 = smallannot[10].split("_")
po11 = array2[0].split(".")
orig_aacid = po11[1][0:3]
pos11 = po11[1][3:]
pos12 = re.findall(r'\d+', array2[1])[0]
codon_pos = pos11 + "-" + pos12
else:
if len(smallannot[10]) > 0:
codon_pos = re.findall(r'\d+', smallannot[10])[0]
orig_aacid = smallannot[10][2:5]
else:
codon_pos = "NA"
orig_aacid = "NA"
else :
orig_aacid = smallannot[10][2:5]
if '*' in smallannot[10] or '?' in smallannot[10] :
new_aacid = 'NA'
else:
new_aacid = smallannot[10][-3:]
transcript_pos = re.findall(r'\d+', smallannot[9])[0]
codon_pos = re.findall(r'\d+', smallannot[10])[0]
for x in range(0,82):
if (int(start[x]) -1) < int(position) < (int(stop[x]) + 1):
annotation = gene_anot[x]
if strand[x] == 'forward':
gene_id = genezid[x]
nuc_change = str((int(position)) - (int(stop[x]) + 1))
nucleotide_change = "c." + nuc_change + reference + ">" + alternate
elif strand[x] == 'reverse':
ref_comp = ""
alt_comp = ""
for char in reference:
ref_comp += dic[char]
for char in alternate:
alt_comp += dic[char]
gene_id = genezid[x]
nuc_change = str((int(start[x]) -1) - int(position))
nucleotide_change = "c." + nuc_change + ref_comp + ">" + alt_comp
gene_name = genez[x]
amino_acid_change = 'NA'
if len(fields[4]) > len(fields[3]):
if strand[x] == 'forward':
nucleotide_change = "c." + nuc_change + "_" + str(int(nuc_change) + 1) + "ins" + alternate[len(reference):]
elif strand[x] == 'reverse':
nucleotide_change = "c." + str(int(nuc_change) - 1) + "_" + nuc_change + "ins" + alt_comp[len(reference):][::-1]
variant = "Insertion"
elif len(fields[3]) > len(fields[4]):
if strand[x] == 'forward':
if len(reference) - len(alternate) == 1:
nucleotide_change = "c." + str(int(nuc_change) + len(alternate)) + "del" + reference[len(alternate):]
else:
nucleotide_change = "c." + str(int(nuc_change) + len(alternate)) + "_" + str(int(nuc_change) + len(reference) - 1) + "del" + reference[len(alternate):]
elif strand[x] == 'reverse':
if len(reference) - len(alternate) == 1:
nucleotide_change = "c." + str(int(nuc_change) - len(reference) - 1) + "del" + ref_comp[len(alternate):][::-1]
else:
nucleotide_change = "c." + str(int(nuc_change) - len(reference) - 1) + "_" + str(int(nuc_change) - len(alternate)) + "del" + ref_comp[len(alternate):][::-1]
variant = "Deletion"
else:
variant = "SNP"
transcript = 'NA'
transcript_pos = 'NA'
orig_aacid = 'NA'
new_aacid = 'NA'
codon_pos = 'NA'
annotation_details = ','.join(subannot[0:])
break
if len(position1) != 0:
if codon_pos == codon_pos1 and (int(position) - int(position1)) < 4 and float(perc_alt11) > 98.0 :
Block = True
print input2 + "\t" + fields[0] + "\t" + position1 + "\t" + reference1 + "\t" + alternate1 + "\t" + read_depth1 + "\t" + quality1 + "\t" + perc_alt11 + "\t" + annotation1 + "\t" + 'MNV' + "\t" + nucleotide_change1 + "\t" + transcript_pos1 + "\t" + amino_acid_change1 + "\t" + orig_aacid1 + "\t" + 'Block_Substitution' + "\t" + codon_pos1 + "\t" + gene_name1 + "\t" + gene_id1 + "\t" + transcript1 + "\t" + annotation_details1
elif Block == True:
print input2 + "\t" + fields[0] + "\t" + position1 + "\t" + reference1 + "\t" + alternate1 + "\t" + read_depth1 + "\t" + quality1 + "\t" + perc_alt11 + "\t" + annotation1 + "\t" + 'MNV' + "\t" + nucleotide_change1 + "\t" + transcript_pos1 + "\t" + amino_acid_change1 + "\t" + orig_aacid1 + "\t" + 'Block_Substitution' + "\t" + codon_pos1 + "\t" + gene_name1 + "\t" + gene_id1 + "\t" + transcript1 + "\t" + annotation_details1
Block = False
else:
print input2 + "\t" + fields[0] + "\t" + position1 + "\t" + reference1 + "\t" + alternate1 + "\t" + read_depth1 + "\t" + quality1 + "\t" + perc_alt11 + "\t" + annotation1 + "\t" + variant1 + "\t" + nucleotide_change1 + "\t" + transcript_pos1 + "\t" + amino_acid_change1 + "\t" + orig_aacid1 + "\t" + new_aacid1 + "\t" + codon_pos1 + "\t" + gene_name1 + "\t" + gene_id1 + "\t" + transcript1 + "\t" + annotation_details1
position1 = position
reference1 = reference
alternate1 = alternate
annotation1 = annotation
variant1 = variant
read_depth1 = read_depth
quality1 = quality
perc_alt11 = perc_alt
nucleotide_change1 = nucleotide_change
transcript_pos1 = transcript_pos
amino_acid_change1 = amino_acid_change
orig_aacid1 = orig_aacid
new_aacid1 = new_aacid
codon_pos1 = codon_pos
gene_name1 = gene_name
gene_id1 = gene_id
transcript1 = transcript
annotation_details1 = annotation_details
if Block == True:
print input2 + "\t" + fields[0] + "\t" + position1 + "\t" + reference1 + "\t" + alternate1 + "\t" + read_depth1 + "\t" + quality1 + "\t" + perc_alt11 + "\t" + annotation1 + "\t" + 'MNV' + "\t" + nucleotide_change1 + "\t" + transcript_pos1 + "\t" + amino_acid_change1 + "\t" + orig_aacid1 + "\t" + 'Block_Substitution' + "\t" + codon_pos1 + "\t" + gene_name1 + "\t" + gene_id1 + "\t" + transcript1 + "\t" + annotation_details1
else:
print input2 + "\t" + fields[0] + "\t" + position1 + "\t" + reference1 + "\t" + alternate1 + "\t" + read_depth1 + "\t" + quality1 + "\t" + perc_alt11 + "\t" + annotation1 + "\t" + variant1 + "\t" + nucleotide_change1 + "\t" + transcript_pos1 + "\t" + amino_acid_change1 + "\t" + orig_aacid1 + "\t" + new_aacid1 + "\t" + codon_pos1 + "\t" + gene_name1 + "\t" + gene_id1 + "\t" + transcript1 + "\t" + annotation_details1
| 49.835821
| 460
| 0.493741
|
b78806120f3d3a17a6938e0fdb5c05e41ac0f7d6
| 693
|
py
|
Python
|
src/captureImage.py
|
CquKeith/FFMPEG
|
e2d8a2415f6b08ad400bf3e7cd3386b8dce51b06
|
[
"MIT"
] | 2
|
2020-12-10T10:27:19.000Z
|
2021-04-19T07:13:07.000Z
|
src/captureImage.py
|
CquKeith/FFMPEG
|
e2d8a2415f6b08ad400bf3e7cd3386b8dce51b06
|
[
"MIT"
] | null | null | null |
src/captureImage.py
|
CquKeith/FFMPEG
|
e2d8a2415f6b08ad400bf3e7cd3386b8dce51b06
|
[
"MIT"
] | null | null | null |
# -- coding: utf-8 --
# 捕获视频帧
from FFmpegUtil import *
chunk_file_name = '118-120.avi'
def capture_image(input_file, out_file_dir):
'''
截取input_file的[start,end]的每一帧,存储到out_file_dir中
:param input_file: 要capture的视频文件
:param start: 从第几帧开始截取
:param end :到这一帧结束
:param out_file_dir: 输出的文件夹
:return:
'''
cmd = "ffmpeg -y -i {} -vf fps=24 {}/%2d.jpeg".format(input_file,out_file_dir)
print(cmd)
run_command(cmd)
def main():
for level in range(1,6):
out_dir = "../img/level%s"%level
make_dirs_if_not_exists(out_dir)
capture_image("../video/level%s/%s"%(level,chunk_file_name),out_dir)
if __name__ == '__main__':
main()
| 23.1
| 82
| 0.649351
|
eba2a97e4f097939b6a864f5dab119da48af3131
| 4,087
|
py
|
Python
|
Assignment 3/perceptron.py
|
vamsi3/IITB-Machine-Learning-AI
|
f09f12a89658a746bbe8753e9ed3ed10b1a0e3cd
|
[
"MIT"
] | 3
|
2020-11-04T15:12:32.000Z
|
2022-01-28T12:21:38.000Z
|
Assignment 3/perceptron.py
|
vamsi3/CS335-IIT-Bombay
|
f09f12a89658a746bbe8753e9ed3ed10b1a0e3cd
|
[
"MIT"
] | null | null | null |
Assignment 3/perceptron.py
|
vamsi3/CS335-IIT-Bombay
|
f09f12a89658a746bbe8753e9ed3ed10b1a0e3cd
|
[
"MIT"
] | 2
|
2020-09-02T03:23:21.000Z
|
2022-02-07T05:52:52.000Z
|
# perceptron.py
# -------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# Perceptron implementation
import util
import numpy as np
import sys
import random
PRINT = True
###### DON'T CHANGE THE SEEDS ##########
random.seed(42)
np.random.seed(42)
class PerceptronClassifier:
"""
Perceptron classifier.
Note that the variable 'datum' in this code refers to a counter of features
(not to a raw samples.Datum).
Note that this time around the weights are referred to just a single lable instead of a list.
"""
def __init__( self, legalLabels, max_iterations):
self.legalLabels = legalLabels
self.type = "perceptron"
self.max_iterations = max_iterations
##################IMPORTANT######################
# The self.weights is just one instance of Counter unlike last time
#################################################
self.weights = util.Counter()
def setWeights(self, weights):
assert type(weights) == type(self.weights)
self.weights = weights
def train( self, trainingData, trainingLabels, sample_weights=None):
"""
The training loop for the perceptron passes through the training data several
times and updates the weight vector for each label based on classification errors.
See the assignment description for details.
Use the provided self.weights data structure so that
the classify method works correctly. Also, recall that a
datum is a counter from features to values for those features
(and thus represents a vector of values).
"""
self.features = trainingData[0].keys() # could be useful later
# DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR
# THE AUTOGRADER WILL LIKELY DEDUCT POINTS.
if sample_weights is not None:
trainingData, trainingLabels = self.sample_data(trainingData, trainingLabels, sample_weights)
for iteration in range(self.max_iterations):
for i in range(len(trainingData)):
# "*** YOUR CODE HERE ***"
y, y_hat = trainingLabels[i], self.weights * trainingData[i]
if y * y_hat <= 0:
y_times_x = trainingData[i].copy()
y_times_x.mulAll(y)
self.weights += y_times_x
# util.raiseNotDefined()
def sample_data(self, trainingData, trainingLabels, sample_weights):
# "*** YOUR CODE HERE ***"
n = len(trainingData)
sample_indices = np.random.choice(n, size=int(n * 1.1), replace=True, p=sample_weights)
sample_training_data = map(trainingData.__getitem__, sample_indices)
sample_training_labels = map(trainingLabels.__getitem__, sample_indices)
return sample_training_data, sample_training_labels
# util.raiseNotDefined()
def classify(self, data ):
"""
Classifies each datum as the label that most closely matches the prototype vector
for that label. See the assignment description for details.
Note that this time around the labels are just -1 and 1.
Recall that a datum is a util.counter.
"""
guesses = []
vectors = util.Counter()
for datum in data:
guess = int(np.sign(self.weights * datum))
if guess == 0:
guess = np.random.choice(self.legalLabels)
guesses.append(guess)
return guesses
| 38.196262
| 105
| 0.643993
|
f573ab151673acdfd6c7c5e63bc88f63e0974aca
| 762
|
py
|
Python
|
desktop/core/ext-py/Babel-0.9.6/babel/messages/tests/plurals.py
|
vinaymundada27/Hue
|
7bffb33bbe7cfa34d340241c4ba3b19476211b2a
|
[
"Apache-2.0"
] | 19
|
2015-05-01T19:59:03.000Z
|
2021-12-09T08:03:16.000Z
|
desktop/core/ext-py/Babel-0.9.6/babel/messages/tests/plurals.py
|
vinaymundada27/Hue
|
7bffb33bbe7cfa34d340241c4ba3b19476211b2a
|
[
"Apache-2.0"
] | 1
|
2018-01-03T15:26:49.000Z
|
2018-01-03T15:26:49.000Z
|
desktop/core/ext-py/Babel-0.9.6/babel/messages/tests/plurals.py
|
vinaymundada27/Hue
|
7bffb33bbe7cfa34d340241c4ba3b19476211b2a
|
[
"Apache-2.0"
] | 30
|
2015-03-25T19:40:07.000Z
|
2021-05-28T22:59:26.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import doctest
import unittest
from babel.messages import plurals
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(plurals))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 29.307692
| 68
| 0.724409
|
9b03b182cf05795cdf9c33a51ba752c1baaead91
| 943
|
py
|
Python
|
modin/core/dataframe/base/exchange/dataframe_protocol/__init__.py
|
yizx-1017/modin
|
2eee697135b30a9694c202456db0635c52c9e6c9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/core/dataframe/base/exchange/dataframe_protocol/__init__.py
|
yizx-1017/modin
|
2eee697135b30a9694c202456db0635c52c9e6c9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/core/dataframe/base/exchange/dataframe_protocol/__init__.py
|
yizx-1017/modin
|
2eee697135b30a9694c202456db0635c52c9e6c9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Base Modin Dataframe functionality related to the dataframe exchange protocol.
See more in https://data-apis.org/dataframe-protocol/latest/index.html.
"""
| 49.631579
| 87
| 0.783669
|
cbb95119c268180b522de3c2afda1d836e78e7dc
| 4,275
|
py
|
Python
|
src/vws/exceptions/vws_exceptions.py
|
admdev8/vws-python
|
1ef2492e3e9f860fe42768898cc69390bd295d06
|
[
"MIT"
] | 7
|
2017-01-05T09:05:44.000Z
|
2020-05-14T06:41:47.000Z
|
src/vws/exceptions/vws_exceptions.py
|
adamtheturtle/vws-python
|
1afcba0ed46c0e82b2d80d5fe06781d909b468a4
|
[
"MIT"
] | 665
|
2016-12-14T23:03:53.000Z
|
2020-05-14T21:22:39.000Z
|
src/vws/exceptions/vws_exceptions.py
|
Smirenost/vws-python
|
1ef2492e3e9f860fe42768898cc69390bd295d06
|
[
"MIT"
] | 5
|
2020-08-17T15:18:35.000Z
|
2021-05-21T08:50:41.000Z
|
"""
Exception raised when Vuforia returns a response with a result code matching
one of those documented at
https://library.vuforia.com/articles/Solution/How-To-Use-the-Vuforia-Web-Services-API#How-To-Interperete-VWS-API-Result-Codes.
"""
import json
from urllib.parse import urlparse
from vws.exceptions.base_exceptions import VWSException
class UnknownTarget(VWSException):
"""
Exception raised when Vuforia returns a response with a result code
'UnknownTarget'.
"""
@property
def target_id(self) -> str:
"""
The unknown target ID.
"""
path = urlparse(self.response.url).path
# Every HTTP path which can raise this error is in the format
# `/something/{target_id}`.
return path.split(sep='/', maxsplit=2)[-1]
class Fail(VWSException):
"""
Exception raised when Vuforia returns a response with a result code
'Fail'.
"""
class BadImage(VWSException):
"""
Exception raised when Vuforia returns a response with a result code
'BadImage'.
"""
class AuthenticationFailure(VWSException):
"""
Exception raised when Vuforia returns a response with a result code
'AuthenticationFailure'.
"""
# See https://github.com/VWS-Python/vws-python/issues/822.
class RequestQuotaReached(VWSException): # pragma: no cover
"""
Exception raised when Vuforia returns a response with a result code
'RequestQuotaReached'.
"""
class TargetStatusProcessing(VWSException):
"""
Exception raised when Vuforia returns a response with a result code
'TargetStatusProcessing'.
"""
@property
def target_id(self) -> str:
"""
The processing target ID.
"""
path = urlparse(self.response.url).path
# Every HTTP path which can raise this error is in the format
# `/something/{target_id}`.
return path.split(sep='/', maxsplit=2)[-1]
# This is not simulated by the mock.
class DateRangeError(VWSException): # pragma: no cover
"""
Exception raised when Vuforia returns a response with a result code
'DateRangeError'.
"""
# This is not simulated by the mock.
class TargetQuotaReached(VWSException): # pragma: no cover
"""
Exception raised when Vuforia returns a response with a result code
'TargetQuotaReached'.
"""
# This is not simulated by the mock.
class ProjectSuspended(VWSException): # pragma: no cover
"""
Exception raised when Vuforia returns a response with a result code
'ProjectSuspended'.
"""
# This is not simulated by the mock.
class ProjectHasNoAPIAccess(VWSException): # pragma: no cover
"""
Exception raised when Vuforia returns a response with a result code
'ProjectHasNoAPIAccess'.
"""
class ProjectInactive(VWSException):
"""
Exception raised when Vuforia returns a response with a result code
'ProjectInactive'.
"""
class MetadataTooLarge(VWSException):
"""
Exception raised when Vuforia returns a response with a result code
'MetadataTooLarge'.
"""
class RequestTimeTooSkewed(VWSException):
"""
Exception raised when Vuforia returns a response with a result code
'RequestTimeTooSkewed'.
"""
class TargetNameExist(VWSException):
"""
Exception raised when Vuforia returns a response with a result code
'TargetNameExist'.
"""
@property
def target_name(self) -> str:
"""
The target name which already exists.
"""
response_body = self.response.request.body or b''
request_json = json.loads(response_body)
return str(request_json['name'])
class ImageTooLarge(VWSException):
"""
Exception raised when Vuforia returns a response with a result code
'ImageTooLarge'.
"""
class TargetStatusNotSuccess(VWSException):
"""
Exception raised when Vuforia returns a response with a result code
'TargetStatusNotSuccess'.
"""
@property
def target_id(self) -> str:
"""
The unknown target ID.
"""
path = urlparse(self.response.url).path
# Every HTTP path which can raise this error is in the format
# `/something/{target_id}`.
return path.split(sep='/', maxsplit=2)[-1]
| 25.598802
| 126
| 0.670175
|
f2f00b1aa5be124e00cf8b63decffae15a057450
| 5,257
|
py
|
Python
|
tests/persistence/person_dao_spark_test.py
|
pydev-bootcamp/python-etl
|
5db16bdf653861e14e6b30359622d83e45ca4f88
|
[
"Unlicense"
] | null | null | null |
tests/persistence/person_dao_spark_test.py
|
pydev-bootcamp/python-etl
|
5db16bdf653861e14e6b30359622d83e45ca4f88
|
[
"Unlicense"
] | null | null | null |
tests/persistence/person_dao_spark_test.py
|
pydev-bootcamp/python-etl
|
5db16bdf653861e14e6b30359622d83e45ca4f88
|
[
"Unlicense"
] | null | null | null |
"""
Unit tests for PersonDao
"""
from datetime import datetime
import os
from os.path import dirname, abspath, join
from pytest import raises
from tempfile import mkstemp
from typing import List, Optional, TextIO
from textwrap import dedent
from manage_accounts.model.person import Person
from manage_accounts.persistence.person_dao_spark import PersonDaoSpark
# from pprint import pprint # pretty-print Python data structures
dao: PersonDaoSpark
temp_file: Optional[TextIO]
temp_file_handle: int
temp_file_path: str
data_file_path: str = join(dirname(abspath(__file__)),
"person_dao_spark_test.jsonl")
def setup_function() -> None:
# create temp file
global temp_file_handle, temp_file_path, temp_file
temp_file_handle, temp_file_path = mkstemp(text=True)
temp_file = open(temp_file_path, "w")
def teardown_function() -> None:
global dao, temp_file_handle, temp_file
if dao:
dao.close()
if temp_file: # else file was closed in init_test_data()
temp_file.close()
if temp_file_handle: # remove temp file
os.close(temp_file_handle)
os.remove(temp_file_path)
def init_test_data(test_data: str) -> None:
global temp_file
temp_file.write(dedent(test_data))
temp_file.close()
temp_file = None
def test_find_all_args_supplied() -> None:
global dao
dao = PersonDaoSpark(data_file_path)
# dao.df.printSchema()
results = [person for person in dao.find(
"Vivien", "Theodore", "Thomas")]
assert 1 == len(results)
p = results[0]
assert isinstance(p, Person)
assert (1, "Vivien", "Theodore", "Thomas") == \
(p.id, p.given, p.middle, p.family)
assert p.created_time < datetime.utcnow()
def test_find_given_arg_only() -> None:
global dao
dao = PersonDaoSpark(data_file_path)
results = [person for person in dao.find("Elizabeth")]
assert 1 == len(results)
p = results[0]
assert isinstance(p, Person)
assert (2, "Elizabeth", "", "Blackwell") == \
(p.id, p.given, p.middle, p.family)
assert p.created_time < datetime.utcnow()
def test_find_given_arg_only_two_results() -> None:
global dao
dao = PersonDaoSpark(data_file_path)
results = [person for person in dao.find("Thomas")]
assert 2 == len(results)
sorted(results, key=lambda person: person.id)
assert [(p.id, p.given, p.middle, p.family) for p in results] == \
[(4, "Thomas", "", "Addison"),
(5, "Thomas", "", "Sydenham")]
def test_find_several_queries() -> None:
global dao
dao = PersonDaoSpark(data_file_path)
# next(iter(...)) performs one iteration of the Iterable argument
p: Person = next(iter(dao.find("Vivien", "Theodore", "Thomas")))
assert ("Vivien", "Theodore", "Thomas") == (p.given, p.middle, p.family)
p = next(iter(dao.find("Elizabeth", family="Blackwell")))
assert ("Elizabeth", "", "Blackwell") == (p.given, p.middle, p.family)
p = next(iter(dao.find("Hippocrates")))
assert ("Hippocrates", "", "") == (p.given, p.middle, p.family)
def test_find_person_not_present() -> None:
global dao
dao = PersonDaoSpark(data_file_path)
results: List[Person] = [person for person in dao.find("NotThere")]
assert 0 == len(results)
def test_find_no_args_raises_exception() -> None:
global dao
dao = PersonDaoSpark(data_file_path)
with raises(ValueError, match=r"arguments.*empty"):
next(iter(dao.find()))
# find() is s generator function, so it won't be executed unless
# it's used for iteration
def test_find_by_id_success() -> None:
global dao
dao = PersonDaoSpark(data_file_path)
person: Person = dao.find_by_id(3)
assert "Hippocrates" == person.given
def test_find_by_id_not_present() -> None:
global dao
dao = PersonDaoSpark(data_file_path)
assert dao.find_by_id(0) is None
def test_find_by_id_json() -> None:
global dao
init_test_data("""
{"id": 1, "given": "Vivien", "middle": "Theodore", "family": "Thomas", "created_time": 1576109811}
{"id": 2, "given": "Hippocrates", "created_time": 1576109813}
{"id": 3, "given": "Thomas", "family": "Addison", "created_time": 1576109814}
""")
dao = PersonDaoSpark(temp_file_path)
person: Person = dao.find_by_id(2)
assert "Hippocrates" == person.given
def test_find_by_id_csv() -> None:
global dao
init_test_data("""\
1,Vivien,Theodore,Thomas,1576109811
2,Hippocrates,,1576109812
3,Thomas,,Addison,1576109813
""")
dao = PersonDaoSpark(temp_file_path, "csv")
person: Person = dao.find_by_id(2)
assert "Hippocrates" == person.given
def test_find_by_id_duplicate_id_raises_exception() -> None:
global dao
init_test_data("""\
{"id": 1, "given": "Vivien", "middle": "Theodore", "family": "Thomas", "created_time": 1576109811}
{"id": 2, "given": "Hippocrates", "created_time": 1576109813}
{"id": 2, "given": "Thomas", "family": "Addison", "created_time": 1576109814}
""")
dao = PersonDaoSpark(temp_file_path)
with raises(ValueError, match=r"duplicate.*([Ii][Dd]|[Kk]ey)"):
dao.find_by_id(2)
| 29.700565
| 106
| 0.658551
|
8975c9a3ae3517b87a267340eadcfc7964ebcb7b
| 10,805
|
py
|
Python
|
examples/models.py
|
smit14/SentEval
|
bbf74ee8b918bc359d9494021a737229116cb8f0
|
[
"BSD-3-Clause"
] | null | null | null |
examples/models.py
|
smit14/SentEval
|
bbf74ee8b918bc359d9494021a737229116cb8f0
|
[
"BSD-3-Clause"
] | null | null | null |
examples/models.py
|
smit14/SentEval
|
bbf74ee8b918bc359d9494021a737229116cb8f0
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This file contains the definition of encoders used in https://arxiv.org/pdf/1705.02364.pdf
"""
import numpy as np
import time
import torch
import torch.nn as nn
import json
class InferSent(nn.Module):
def __init__(self, config):
super(InferSent, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.version = 1 if 'version' not in config else config['version']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim, 1,
bidirectional=True, dropout=self.dpout_model)
assert self.version in [1, 2]
if self.version == 1:
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
elif self.version == 2:
self.bos = '<p>'
self.eos = '</p>'
self.max_pad = False
self.moses_tok = True
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return self.enc_lstm.bias_hh_l0.data.is_cuda
def forward(self, sent_tuple):
# sent_len: [max_len, ..., min_len] (bsize)
# sent: (seqlen x bsize x worddim)
sent, sent_len = sent_tuple
# Sort by length (keep idx)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_sort)
sent = sent.index_select(1, idx_sort)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len_sorted)
sent_output = self.enc_lstm(sent_packed)[0] # seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = torch.from_numpy(idx_unsort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_unsort)
sent_output = sent_output.index_select(1, idx_unsort)
# Pooling
if self.pool_type == "mean":
sent_len = torch.FloatTensor(sent_len.copy()).unsqueeze(1).cuda()
emb = torch.sum(sent_output, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
if not self.max_pad:
sent_output[sent_output == 0] = -1e9
emb = torch.max(sent_output, 0)[0]
if emb.ndimension() == 3:
emb = emb.squeeze(0)
assert emb.ndimension() == 2
return emb
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
##############################################################################################
def get_w2v_small(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
glove_small = json.load(open(self.w2v_path))
for word in glove_small.keys():
if word in word_dict:
word_vec[word] = np.array(glove_small[word])
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
##############################################################################################
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path, encoding='utf-8') as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True, is_small = False):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
####################################################################
self.word_vec = self.get_w2v(word_dict)
if is_small:
self.word_vec = self.get_w2v_small(word_dict)
####################################################################
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch = self.get_batch(sentences[stidx:stidx + bsize])
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward((batch, lengths[stidx:stidx + bsize])).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
def visualize(self, sent, tokenize=True):
sent = sent.split() if not tokenize else self.tokenize(sent)
sent = [[self.bos] + [word for word in sent if word in self.word_vec] + [self.eos]]
if ' '.join(sent[0]) == '%s %s' % (self.bos, self.eos):
import warnings
warnings.warn('No words in "%s" have w2v vectors. Replacing \
by "%s %s"..' % (sent, self.bos, self.eos))
batch = self.get_batch(sent)
if self.is_cuda():
batch = batch.cuda()
output = self.enc_lstm(batch)[0]
output, idxs = torch.max(output, 0)
# output, idxs = output.squeeze(), idxs.squeeze()
idxs = idxs.data.cpu().numpy()
argmaxs = [np.sum((idxs == k)) for k in range(len(sent[0]))]
# visualize model
import matplotlib.pyplot as plt
x = range(len(sent[0]))
y = [100.0 * n / np.sum(argmaxs) for n in argmaxs]
plt.xticks(x, sent[0], rotation=45)
plt.bar(x, y)
plt.ylabel('%')
plt.title('Visualisation of words importance')
plt.show()
return output, idxs
| 37.77972
| 98
| 0.547802
|
c8265dc035ba352b92fe2f6338520b5f85cff6b3
| 6,788
|
py
|
Python
|
oneflow/compatible_single_client_python/test/ops/test_argwhere.py
|
xcnick/oneflow
|
7b786b27069dec35d2493256011e773988c91f56
|
[
"Apache-2.0"
] | null | null | null |
oneflow/compatible_single_client_python/test/ops/test_argwhere.py
|
xcnick/oneflow
|
7b786b27069dec35d2493256011e773988c91f56
|
[
"Apache-2.0"
] | null | null | null |
oneflow/compatible_single_client_python/test/ops/test_argwhere.py
|
xcnick/oneflow
|
7b786b27069dec35d2493256011e773988c91f56
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import unittest
from collections import OrderedDict
import os
from oneflow.compatible import single_client as flow
from test_util import GenArgDict
def _np_dtype_to_of_dtype(np_dtype):
if np_dtype == np.float32:
return flow.float32
elif np_dtype == np.int32:
return flow.int32
elif np_dtype == np.int64:
return flow.int64
elif np_dtype == np.int8:
return flow.int8
else:
raise NotImplementedError
def _random_input(shape, dtype):
if dtype == np.float32:
rand_ = np.random.random_sample(shape).astype(np.float32)
rand_[np.nonzero(rand_ < 0.5)] = 0.0
return rand_
elif dtype == np.int32:
return np.random.randint(low=0, high=2, size=shape).astype(np.int32)
elif dtype == np.int8:
return np.random.randint(low=0, high=2, size=shape).astype(np.int8)
else:
raise NotImplementedError
def _of_argwhere(x, index_dtype, device_type="gpu", device_num=1, dynamic=False):
data_type = _np_dtype_to_of_dtype(x.dtype)
out_data_type = _np_dtype_to_of_dtype(index_dtype)
flow.clear_default_session()
if device_type == "gpu":
flow.config.gpu_device_num(device_num)
elif device_type == "cpu":
flow.config.cpu_device_num(device_num)
else:
raise ValueError
assert device_num > 0
func_config = flow.FunctionConfig()
func_config.default_data_type(data_type)
func_config.default_placement_scope(
flow.scope.placement(device_type, "0:0-{}".format(device_num - 1))
)
if dynamic is True:
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function("predict", function_config=func_config)
def argwhere_fn(
x: flow.typing.ListNumpy.Placeholder(x.shape, dtype=data_type)
) -> flow.typing.ListNumpy:
return flow.argwhere(x, dtype=out_data_type)
return argwhere_fn([x] * device_num)[0]
else:
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function("predict", function_config=func_config)
def argwhere_fn(
x: flow.typing.Numpy.Placeholder(x.shape, dtype=data_type)
) -> flow.typing.ListNumpy:
return flow.argwhere(x, dtype=out_data_type)
return argwhere_fn(x)[0]
def _compare_with_np(
test_case,
shape,
value_dtype,
index_dtype,
device_type="gpu",
device_num=1,
dynamic=False,
verbose=False,
):
if verbose:
print("shape:", shape)
print("value_dtype:", value_dtype)
print("index_dtype:", index_dtype)
print("device_type:", device_type)
print("device_num:", device_num)
print("dynamic:", dynamic)
x = _random_input(shape, value_dtype)
y = np.argwhere(x)
of_y = _of_argwhere(
x, index_dtype, device_type=device_type, device_num=device_num, dynamic=dynamic
)
if verbose is True:
print("input:", x)
print("np result:", y)
print("of result:", of_y)
test_case.assertTrue(np.array_equal(y, of_y))
def _dynamic_multi_iter_compare(
test_case,
iter_num,
shape,
value_dtype,
index_dtype,
device_type="gpu",
verbose=False,
):
x = [_random_input(shape, value_dtype) for _ in range(iter_num)]
y = [np.argwhere(x_) for x_ in x]
data_type = _np_dtype_to_of_dtype(value_dtype)
out_data_type = _np_dtype_to_of_dtype(index_dtype)
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(data_type)
func_config.default_placement_scope(flow.scope.placement(device_type, "0:0"))
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function("predict", function_config=func_config)
def argwhere_fn(
x: flow.typing.Numpy.Placeholder(tuple(shape), dtype=data_type)
) -> flow.typing.ListNumpy:
return flow.argwhere(x, dtype=out_data_type)
results = []
for x_ in x:
y_ = argwhere_fn(x_)[0]
results.append(y_)
for i, result in enumerate(results):
test_case.assertTrue(np.array_equal(result, y[i]))
@flow.unittest.skip_unless_1n1d()
class TestArgwhere(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_argwhere(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(10), (30, 4), (8, 256, 20)]
arg_dict["value_dtype"] = [np.float32, np.int32, np.int8]
arg_dict["index_dtype"] = [np.int32, np.int64]
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["dynamic"] = [True, False]
arg_dict["verbose"] = [False]
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_argwhere_multi_iter(test_case):
arg_dict = OrderedDict()
arg_dict["iter_num"] = [2]
arg_dict["shape"] = [(20, 4)]
arg_dict["value_dtype"] = [np.float32, np.int32, np.int8]
arg_dict["index_dtype"] = [np.int32, np.int64]
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["verbose"] = [False]
for arg in GenArgDict(arg_dict):
_dynamic_multi_iter_compare(test_case, **arg)
@flow.unittest.skip_unless_1n4d()
class TestArgwhere4D(flow.unittest.TestCase):
# @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
# TODO(zhangwenxiao, jiangxuefei): refine in multi-client
@unittest.skipIf(True, "skip for now because of single-client tensor_list removed")
def test_argwhere(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(10, 5)]
arg_dict["value_dtype"] = [np.float32, np.int32, np.int8]
arg_dict["index_dtype"] = [np.int32, np.int64]
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["device_num"] = [4]
arg_dict["dynamic"] = [True]
arg_dict["verbose"] = [False]
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
if __name__ == "__main__":
unittest.main()
| 32.792271
| 87
| 0.669564
|
ae53f0b78e8ec3aec388c4db65c0cbc1238d5b12
| 9,235
|
py
|
Python
|
client_sdk_python/providers/eth_tester/middleware.py
|
awake006/client-sdk-python
|
34393e417f74a65f04b643b26d8f9d2ccb0c886e
|
[
"MIT"
] | null | null | null |
client_sdk_python/providers/eth_tester/middleware.py
|
awake006/client-sdk-python
|
34393e417f74a65f04b643b26d8f9d2ccb0c886e
|
[
"MIT"
] | null | null | null |
client_sdk_python/providers/eth_tester/middleware.py
|
awake006/client-sdk-python
|
34393e417f74a65f04b643b26d8f9d2ccb0c886e
|
[
"MIT"
] | null | null | null |
import operator
from eth_utils import (
is_dict,
is_hex,
is_string,
)
from client_sdk_python.middleware import (
construct_fixture_middleware,
construct_formatting_middleware,
)
from client_sdk_python.utils.formatters import (
apply_formatter_if,
apply_formatter_to_array,
apply_formatters_to_args,
apply_formatters_to_dict,
apply_key_map,
hex_to_integer,
integer_to_hex,
is_array_of_dicts,
remove_key_if,
static_return,
)
from client_sdk_python.utils.toolz import (
assoc,
complement,
compose,
curry,
identity,
partial,
pipe,
)
def is_named_block(value):
return value in {"latest", "earliest", "pending"}
def is_hexstr(value):
return is_string(value) and is_hex(value)
to_integer_if_hex = apply_formatter_if(is_hexstr, hex_to_integer)
is_not_named_block = complement(is_named_block)
TRANSACTION_KEY_MAPPINGS = {
'block_hash': 'blockHash',
'block_number': 'blockNumber',
'gas_price': 'gasPrice',
'transaction_hash': 'transactionHash',
'transaction_index': 'transactionIndex',
}
transaction_key_remapper = apply_key_map(TRANSACTION_KEY_MAPPINGS)
LOG_KEY_MAPPINGS = {
'log_index': 'logIndex',
'transaction_index': 'transactionIndex',
'transaction_hash': 'transactionHash',
'block_hash': 'blockHash',
'block_number': 'blockNumber',
}
log_key_remapper = apply_key_map(LOG_KEY_MAPPINGS)
RECEIPT_KEY_MAPPINGS = {
'block_hash': 'blockHash',
'block_number': 'blockNumber',
'contract_address': 'contractAddress',
'gas_used': 'gasUsed',
'cumulative_gas_used': 'cumulativeGasUsed',
'transaction_hash': 'transactionHash',
'transaction_index': 'transactionIndex',
}
receipt_key_remapper = apply_key_map(RECEIPT_KEY_MAPPINGS)
BLOCK_KEY_MAPPINGS = {
'gas_limit': 'gasLimit',
'sha3_uncles': 'sha3Uncles',
'transactions_root': 'transactionsRoot',
'parent_hash': 'parentHash',
'bloom': 'logsBloom',
'state_root': 'stateRoot',
'receipt_root': 'receiptsRoot',
'total_difficulty': 'totalDifficulty',
'extra_data': 'extraData',
'gas_used': 'gasUsed',
}
block_key_remapper = apply_key_map(BLOCK_KEY_MAPPINGS)
TRANSACTION_PARAMS_MAPPING = {
'gasPrice': 'gas_price',
}
transaction_params_remapper = apply_key_map(TRANSACTION_PARAMS_MAPPING)
TRANSACTION_PARAMS_FORMATTERS = {
'gas': to_integer_if_hex,
'gasPrice': to_integer_if_hex,
'value': to_integer_if_hex,
# 'nonce': to_integer_if_hex,
}
transaction_params_formatter = compose(
# remove nonce for now due to issue https://github.com/ethereum/eth-tester/issues/80
remove_key_if('nonce', lambda _: True),
apply_formatters_to_dict(TRANSACTION_PARAMS_FORMATTERS),
)
FILTER_PARAMS_MAPPINGS = {
'fromBlock': 'from_block',
'toBlock': 'to_block',
}
filter_params_remapper = apply_key_map(FILTER_PARAMS_MAPPINGS)
FILTER_PARAMS_FORMATTERS = {
'fromBlock': to_integer_if_hex,
'toBlock': to_integer_if_hex,
}
filter_params_formatter = apply_formatters_to_dict(FILTER_PARAMS_FORMATTERS)
filter_params_transformer = compose(filter_params_remapper, filter_params_formatter)
TRANSACTION_FORMATTERS = {
'to': apply_formatter_if(partial(operator.eq, ''), static_return(None)),
}
transaction_formatter = apply_formatters_to_dict(TRANSACTION_FORMATTERS)
RECEIPT_FORMATTERS = {
'logs': apply_formatter_to_array(log_key_remapper),
}
receipt_formatter = apply_formatters_to_dict(RECEIPT_FORMATTERS)
transaction_params_transformer = compose(transaction_params_remapper, transaction_params_formatter)
ethereum_tester_middleware = construct_formatting_middleware(
request_formatters={
# Eth
'eth_getBlockByNumber': apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
'eth_getFilterChanges': apply_formatters_to_args(hex_to_integer),
'eth_getFilterLogs': apply_formatters_to_args(hex_to_integer),
'eth_getBlockTransactionCountByNumber': apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
'eth_getUncleCountByBlockNumber': apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
'eth_getTransactionByBlockHashAndIndex': apply_formatters_to_args(
identity,
to_integer_if_hex,
),
'eth_getTransactionByBlockNumberAndIndex': apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
to_integer_if_hex,
),
'eth_getUncleByBlockNumberAndIndex': apply_formatters_to_args(
apply_formatter_if(is_not_named_block, to_integer_if_hex),
to_integer_if_hex,
),
'eth_newFilter': apply_formatters_to_args(
filter_params_transformer,
),
'eth_getLogs': apply_formatters_to_args(
filter_params_transformer,
),
'eth_sendTransaction': apply_formatters_to_args(
transaction_params_transformer,
),
'eth_estimateGas': apply_formatters_to_args(
transaction_params_transformer,
),
'eth_call': apply_formatters_to_args(
transaction_params_transformer,
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
'eth_uninstallFilter': apply_formatters_to_args(hex_to_integer),
'eth_getCode': apply_formatters_to_args(
identity,
apply_formatter_if(is_not_named_block, to_integer_if_hex),
),
# EVM
'evm_revert': apply_formatters_to_args(hex_to_integer),
# Personal
'personal_sendTransaction': apply_formatters_to_args(
transaction_params_transformer,
identity,
),
},
result_formatters={
'eth_getBlockByHash': apply_formatter_if(
is_dict,
block_key_remapper,
),
'eth_getBlockByNumber': apply_formatter_if(
is_dict,
block_key_remapper,
),
'eth_getBlockTransactionCountByHash': apply_formatter_if(
is_dict,
transaction_key_remapper,
),
'eth_getBlockTransactionCountByNumber': apply_formatter_if(
is_dict,
transaction_key_remapper,
),
'eth_getTransactionByHash': apply_formatter_if(
is_dict,
compose(transaction_key_remapper, transaction_formatter),
),
'eth_getTransactionReceipt': apply_formatter_if(
is_dict,
compose(receipt_key_remapper, receipt_formatter),
),
'eth_newFilter': integer_to_hex,
'eth_newBlockFilter': integer_to_hex,
'eth_newPendingTransactionFilter': integer_to_hex,
'eth_getLogs': apply_formatter_if(
is_array_of_dicts,
apply_formatter_to_array(log_key_remapper),
),
'eth_getFilterChanges': apply_formatter_if(
is_array_of_dicts,
apply_formatter_to_array(log_key_remapper),
),
'eth_getFilterLogs': apply_formatter_if(
is_array_of_dicts,
apply_formatter_to_array(log_key_remapper),
),
# EVM
'evm_snapshot': integer_to_hex,
},
)
ethereum_tester_fixture_middleware = construct_fixture_middleware({
# Eth
'eth_protocolVersion': '63',
'eth_hashrate': 0,
'eth_gasPrice': 1,
'eth_syncing': False,
'eth_mining': False,
# Net
'net_version': '1',
'net_listening': False,
'net_peerCount': 0,
})
def guess_from(web3, transaction):
coinbase = web3.eth.coinbase
if coinbase is not None:
return coinbase
try:
return web3.eth.accounts[0]
except KeyError as e:
# no accounts available to pre-fill, carry on
pass
return None
def guess_gas(web3, transaction):
return web3.eth.estimateGas(transaction) * 2
@curry
def fill_default(field, guess_func, web3, transaction):
if field in transaction and transaction[field] is not None:
return transaction
else:
guess_val = guess_func(web3, transaction)
return assoc(transaction, field, guess_val)
def default_transaction_fields_middleware(make_request, web3):
fill_default_from = fill_default('from', guess_from, web3)
fill_default_gas = fill_default('gas', guess_gas, web3)
def middleware(method, params):
# TODO send call to eth-tester without gas, and remove guess_gas entirely
if method == 'eth_call':
filled_transaction = pipe(
params[0],
fill_default_from,
fill_default_gas,
)
return make_request(method, [filled_transaction] + params[1:])
elif method in (
'eth_estimateGas',
'eth_sendTransaction',
):
filled_transaction = pipe(
params[0],
fill_default_from,
)
return make_request(method, [filled_transaction] + params[1:])
else:
return make_request(method, params)
return middleware
| 28.155488
| 99
| 0.682079
|
b543ed1bb9d58647000b7519ed6039bb76415ad2
| 48,824
|
py
|
Python
|
tests/syft/lib/python/dict/dict_test.py
|
godormad/PySyft
|
fcb3374b6318dcccf377175fb8db6f70e9e1d1e3
|
[
"Apache-2.0"
] | null | null | null |
tests/syft/lib/python/dict/dict_test.py
|
godormad/PySyft
|
fcb3374b6318dcccf377175fb8db6f70e9e1d1e3
|
[
"Apache-2.0"
] | null | null | null |
tests/syft/lib/python/dict/dict_test.py
|
godormad/PySyft
|
fcb3374b6318dcccf377175fb8db6f70e9e1d1e3
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests copied from cpython test suite:
https://github.com/python/cpython/blob/3.8/Lib/test/test_dict.py
"""
# stdlib
import collections
import collections.abc
import gc
import pickle
import random
import string
import sys
from test import support
import unittest
# third party
import pytest
# syft absolute
from syft.lib.python.dict import Dict
from syft.lib.python.none import SyNone
from syft.lib.python.string import String
# import weakref
class DictTest(unittest.TestCase):
def test_invalid_keyword_arguments(self):
class Custom(dict):
pass
for invalid in {1: 2}, Custom({1: 2}):
with self.assertRaises(TypeError):
dict(**invalid)
with self.assertRaises(TypeError):
{}.update(**invalid)
def test_constructor(self):
# calling built-in types without argument must return empty
self.assertEqual(Dict(), {})
self.assertIsNot(Dict(), {})
@pytest.mark.slow
def test_literal_constructor(self):
# check literal constructor for different sized dicts
# (to exercise the BUILD_MAP oparg).
for n in (0, 1, 6, 256, 400):
items = [
("".join(random.sample(string.ascii_letters, 8)), i) for i in range(n)
]
random.shuffle(items)
formatted_items = (f"{k!r}: {v:d}" for k, v in items)
dictliteral = "{" + ", ".join(formatted_items) + "}"
self.assertEqual(eval(dictliteral), Dict(items))
def test_bool(self):
self.assertIs(not {}, True)
self.assertTrue(Dict({1: 2}))
self.assertIs(bool(Dict({})), False)
self.assertIs(bool(Dict({1: 2})), True)
def test_keys(self):
d = Dict()
self.assertEqual(set(d.keys()), set())
d = {"a": 1, "b": 2}
k = d.keys()
self.assertEqual(set(k), {"a", "b"})
self.assertIn("a", k)
self.assertIn("b", k)
self.assertIn("a", d)
self.assertIn("b", d)
self.assertRaises(TypeError, d.keys, None)
self.assertEqual(repr(dict(a=1).keys()), "dict_keys(['a'])")
def test_values(self):
d = Dict()
self.assertEqual(set(d.values()), set())
d = Dict({1: 2})
self.assertEqual(set(d.values()), {2})
self.assertRaises(TypeError, d.values, None)
self.assertEqual(repr(dict(a=1).values()), "dict_values([1])")
def test_items(self):
# TODO: support this when we have sets:
pass
# d = Dict()
# self.assertEqual(set(d.items()), set())
#
# d = Dict({1: 2})
# self.assertEqual(set(d.items()), {(1, 2)})
# self.assertRaises(TypeError, d.items, None)
# self.assertEqual(repr(dict(a=1).items()), "dict_items([('a', 1)])")
def test_contains(self):
d = Dict()
self.assertNotIn("a", d)
self.assertFalse("a" in d)
self.assertTrue("a" not in d)
d = Dict({"a": 1, "b": 2})
self.assertIn("a", d)
self.assertIn("b", d)
self.assertNotIn("c", d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = Dict()
self.assertEqual(len(d), 0)
d = Dict({"a": 1, "b": 2})
self.assertEqual(len(d), 2)
def test_getitem(self):
d = Dict({"a": 1, "b": 2})
self.assertEqual(d["a"], 1)
self.assertEqual(d["b"], 2)
d["c"] = 3
d["a"] = 4
self.assertEqual(d["c"], 3)
self.assertEqual(d["a"], 4)
del d["b"]
self.assertEqual(d, {"a": 4, "c": 3})
self.assertRaises(TypeError, d.__getitem__)
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = Dict()
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class Exc(Exception):
pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = Dict({1: 1, 2: 2, 3: 3})
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = Dict()
d.update({1: 100})
d.update({2: 20})
d.update({1: 1, 2: 2, 3: 3})
self.assertEqual(d, {1: 1, 2: 2, 3: 3})
d.update()
self.assertEqual(d, {1: 1, 2: 2, 3: 3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {1: 1, 2: 2, 3: 3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1: 1, 2: 2, 3: 3})
class Exc(Exception):
pass
d.clear()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return "a"
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord("a")
def __iter__(self):
return self
def __next__(self):
if self.i <= ord("z"):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, {}.update, badseq())
self.assertRaises(ValueError, {}.update, [(1, 2, 3)])
def test_fromkeys(self):
self.assertEqual(dict.fromkeys("abc"), {"a": None, "b": None, "c": None})
d = Dict()
self.assertIsNot(d.fromkeys("abc"), d)
self.assertEqual(d.fromkeys("abc"), {"a": None, "b": None, "c": None})
self.assertEqual(d.fromkeys((4, 5), 0), {4: 0, 5: 0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1: None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(dict):
pass
self.assertEqual(dictlike.fromkeys("a"), {"a": None})
self.assertEqual(dictlike().fromkeys("a"), {"a": None})
self.assertIsInstance(dictlike.fromkeys("a"), dictlike)
self.assertIsInstance(dictlike().fromkeys("a"), dictlike)
class mydict(dict):
def __new__(cls):
return Dict()
ud = mydict.fromkeys("ab")
self.assertEqual(ud, {"a": None, "b": None})
self.assertIsInstance(ud, Dict)
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception):
pass
class baddict1(dict):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, dict.fromkeys, BadSeq())
class baddict2(dict):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
# test fast path for dictionary inputs
d = dict(zip(range(6), range(6)))
self.assertEqual(dict.fromkeys(d, 0), dict(zip(range(6), [0] * 6)))
class baddict3(dict):
def __new__(cls):
return d
d = {i: i for i in range(10)}
res = d.copy()
res.update(a=None, b=None, c=None)
self.assertEqual(baddict3.fromkeys({"a", "b", "c"}), res)
def test_copy(self):
d = Dict({1: 1, 2: 2, 3: 3})
self.assertIsNot(d.copy(), d)
self.assertEqual(d.copy(), d)
self.assertEqual(d.copy(), {1: 1, 2: 2, 3: 3})
copy = d.copy()
d[4] = 4
self.assertNotEqual(copy, d)
self.assertEqual({}.copy(), {})
self.assertRaises(TypeError, d.copy, None)
@pytest.mark.slow
def test_copy_fuzz(self):
for dict_size in [10, 100, 1000]: # TODO: 10000, 100000
dict_size = random.randrange(dict_size // 2, dict_size + dict_size // 2)
with self.subTest(dict_size=dict_size):
d = Dict()
for i in range(dict_size):
d[i] = i
d2 = d.copy()
self.assertIsNot(d2, d)
self.assertEqual(d, d2)
d2["key"] = "value"
self.assertNotEqual(d, d2)
self.assertEqual(len(d2), len(d) + 1)
def test_copy_maintains_tracking(self):
class A:
pass
key = A()
for d in (Dict(), Dict({"a": 1}), Dict({key: "val"})):
d2 = d.copy()
self.assertEqual(gc.is_tracked(d), gc.is_tracked(d2))
def test_copy_noncompact(self):
# Dicts don't compact themselves on del/pop operations.
# Copy will use a slow merging strategy that produces
# a compacted copy when roughly 33% of dict is a non-used
# keys-space (to optimize memory footprint).
# In this test we want to hit the slow/compacting
# branch of dict.copy() and make sure it works OK.
d = Dict({k: k for k in range(1000)})
for k in range(950):
del d[k]
d2 = d.copy()
self.assertEqual(d2, d)
def test_get(self):
d = Dict()
self.assertIs(d.get("c"), SyNone)
self.assertEqual(d.get("c", 3), 3)
d = Dict({"a": 1, "b": 2})
self.assertEqual(d.get("c"), SyNone)
self.assertEqual(d.get("c", 3), 3)
self.assertEqual(d.get("a"), 1)
self.assertEqual(d.get("a", 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = Dict()
self.assertIs(d.setdefault("key0"), SyNone)
d.setdefault("key0", [])
self.assertIs(d.setdefault("key0"), SyNone)
d.setdefault("key", []).append(3)
self.assertEqual(d["key"][0], 3)
d.setdefault("key", []).append(4)
self.assertEqual(len(d["key"]), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception):
pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
def test_setdefault_atomic(self):
# Issue #13521: setdefault() calls __hash__ and __eq__ only once.
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
y = {hashed1: 5}
hashed2 = Hashed()
y.setdefault(hashed2, [])
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
def test_setitem_atomic_at_resize(self):
class Hashed(object):
def __init__(self):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return id(self) == id(other)
hashed1 = Hashed()
# 5 items
y = Dict({hashed1: 5, 0: 0, 1: 1, 2: 2, 3: 3})
hashed2 = Hashed()
# 6th item forces a resize
y[hashed2] = []
# this is different for UserDict which is 3
# we are subclassing UserDict so if we match UserDict that should be correct
# self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed1.hash_count, 3)
self.assertEqual(hashed2.hash_count, 1)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 1)
@pytest.mark.slow
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(10):
size = 2 ** log2size
a = Dict()
b = Dict()
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, ka.__int__())
kb, vb = tb = b.popitem()
self.assertEqual(vb, kb.__int__())
self.assertFalse(copymode < 0 and ta != tb)
self.assertFalse(a)
self.assertFalse(b)
d = {}
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = Dict()
k, v = "abc", "def"
d[k] = v
self.assertRaises(KeyError, d.pop, "ghi")
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception):
pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutating_iteration(self):
# changing dict size during iteration
d = Dict()
d[1] = 1
with self.assertRaises(RuntimeError):
for i in d:
d[i + 1] = 1
def test_mutating_iteration_delete(self):
# change dict content during iteration
d = Dict()
d[0] = 0
# python 3.8+ raise RuntimeError but older versions do not
if sys.version_info >= (3, 8):
with self.assertRaises(RuntimeError):
for i in d:
del d[0]
d[0] = 0
def test_mutating_iteration_delete_over_values(self):
# change dict content during iteration
d = Dict()
d[0] = 0
# python 3.8+ raise RuntimeError but older versions do not
if sys.version_info >= (3, 8):
with self.assertRaises(RuntimeError):
for i in d.values():
del d[0]
d[0] = 0
def test_mutating_iteration_delete_over_items(self):
# TODO: proper iterators needed over the views, currently, we convert them to lists
pass
# # change dict content during iteration
# d = Dict()
# d[0] = 0
# # python 3.8+ raise RuntimeError but older versions do not
# if sys.version_info >= (3, 8):
# with self.assertRaises(RuntimeError):
# for i in d.items():
# del d[0]
# d[0] = 0
def test_mutating_lookup(self):
# changing dict during a lookup (issue #14417)
# TODO: investigate this at some point
pass
# class NastyKey:
# mutate_dict = None
#
# def __init__(self, value):
# self.value = value
#
# def __hash__(self):
# # hash collision!
# return 1
#
# def __eq__(self, other):
# if NastyKey.mutate_dict:
# mydict, key = NastyKey.mutate_dict
# NastyKey.mutate_dict = None
# del mydict[key]
# return self.value == other.value
#
# key1 = NastyKey(1)
# key2 = NastyKey(2)
# d = Dict({key1: 1})
# NastyKey.mutate_dict = (d, key1)
# d[key2] = 2
# self.assertEqual(d, {key2: 2})
def test_repr(self):
d = Dict()
self.assertEqual(repr(d), "{}")
d[1] = 2
self.assertEqual(repr(d), "{1: 2}")
d = Dict()
d[1] = d
self.assertEqual(repr(d), "{1: {...}}")
class Exc(Exception):
pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = Dict({1: BadRepr()})
self.assertRaises(Exc, repr, d)
def test_repr_deep(self):
d = Dict()
for i in range(sys.getrecursionlimit() + 100):
d = Dict({1: d})
self.assertRaises(RecursionError, repr, d)
def test_eq(self):
self.assertEqual(Dict(), {})
self.assertEqual(Dict({1: 2}), {1: 2})
# TODO, when we have full set and iter support, make this pass as well
pass
# class Exc(Exception):
# pass
#
# class BadCmp(object):
# def __eq__(self, other):
# raise Exc()
#
# def __hash__(self):
# return 1
#
# d1 = Dict({BadCmp(): 1})
# d2 = Dict({1: 1})
#
# with self.assertRaises(Exc):
# d1 == d2
def test_keys_contained(self):
self.helper_keys_contained(lambda x: x.keys())
self.helper_keys_contained(lambda x: x.items())
def helper_keys_contained(self, fn):
# TODO add this when we have set support
pass
# Test rich comparisons against dict key views, which should behave the
# same as sets.
# empty = fn(Dict())
# empty2 = fn(Dict())
# smaller = fn(Dict({1: 1, 2: 2}))
# larger = fn(Dict({1: 1, 2: 2, 3: 3}))
# larger2 = fn(Dict({1: 1, 2: 2, 3: 3}))
# larger3 = fn(Dict({4: 1, 2: 2, 3: 3}))
#
# self.assertTrue(smaller < larger)
# self.assertTrue(smaller <= larger)
# self.assertTrue(larger > smaller)
# self.assertTrue(larger >= smaller)
#
# self.assertFalse(smaller >= larger)
# self.assertFalse(smaller > larger)
# self.assertFalse(larger <= smaller)
# self.assertFalse(larger < smaller)
#
# self.assertFalse(smaller < larger3)
# self.assertFalse(smaller <= larger3)
# self.assertFalse(larger3 > smaller)
# self.assertFalse(larger3 >= smaller)
#
# # Inequality strictness
# self.assertTrue(larger2 >= larger)
# self.assertTrue(larger2 <= larger)
# self.assertFalse(larger2 > larger)
# self.assertFalse(larger2 < larger)
#
# self.assertTrue(larger == larger2)
# self.assertTrue(smaller != larger)
#
# # There is an optimization on the zero-element case.
# self.assertTrue(empty == empty2)
# self.assertFalse(empty != empty2)
# self.assertFalse(empty == smaller)
# self.assertTrue(empty != smaller)
#
# # With the same size, an elementwise compare happens
# self.assertTrue(larger != larger3)
# self.assertFalse(larger == larger3)
def test_errors_in_view_containment_check(self):
# TODO: add support for custom objects
# class C:
# def __eq__(self, other):
# raise RuntimeError
#
# d1 = Dict({1: C()})
# d2 = Dict({1: C()})
# with self.assertRaises(RuntimeError):
# d1.items() == d2.items()
# with self.assertRaises(RuntimeError):
# d1.items() != d2.items()
# with self.assertRaises(RuntimeError):
# d1.items() <= d2.items()
# with self.assertRaises(RuntimeError):
# d1.items() >= d2.items()
#
# d3 = Dict({1: C(), 2: C()})
# with self.assertRaises(RuntimeError):
# d2.items() < d3.items()
# with self.assertRaises(RuntimeError):
# d3.items() > d2.items()
pass
def test_dictview_set_operations_on_keys(self):
# TODO add support for sets
pass
# k1 = Dict({1: 1, 2: 2}).keys()
# k2 = Dict({1: 1, 2: 2, 3: 3}).keys()
# k3 = Dict({4: 4}).keys()
#
# self.assertEqual(k1 - k2, set())
# self.assertEqual(k1 - k3, {1, 2})
# self.assertEqual(k2 - k1, {3})
# self.assertEqual(k3 - k1, {4})
# self.assertEqual(k1 & k2, {1, 2})
# self.assertEqual(k1 & k3, set())
# self.assertEqual(k1 | k2, {1, 2, 3})
# self.assertEqual(k1 ^ k2, {3})
# self.assertEqual(k1 ^ k3, {1, 2, 4})
def test_dictview_set_operations_on_items(self):
# TODO add support for sets
pass
# k1 = Dict({1: 1, 2: 2}).items()
# k2 = Dict({1: 1, 2: 2, 3: 3}).items()
# k3 = Dict({4: 4}).items()
#
# self.assertEqual(k1 - k2, set())
# self.assertEqual(k1 - k3, {(1, 1), (2, 2)})
# self.assertEqual(k2 - k1, {(3, 3)})
# self.assertEqual(k3 - k1, {(4, 4)})
# self.assertEqual(k1 & k2, {(1, 1), (2, 2)})
# self.assertEqual(k1 & k3, set())
# self.assertEqual(k1 | k2, {(1, 1), (2, 2), (3, 3)})
# self.assertEqual(k1 ^ k2, {(3, 3)})
# self.assertEqual(k1 ^ k3, {(1, 1), (2, 2), (4, 4)})
def test_dictview_mixed_set_operations(self):
# TODO add support for sets
pass
# Just a few for .keys()
# self.assertTrue(Dict({1: 1}).keys() == {1})
# self.assertEqual(Dict({1: 1}).keys() | {2}, {1, 2})
# # And a few for .items()
# self.assertTrue(Dict({1: 1}).items() == {(1, 1)})
#
# # This test has been changed to reflect the behavior of UserDict
# self.assertTrue(Dict({(1, 1)}) == {1: 1})
#
# # UserDict does not support init with set items like:
# # UserDict({2}) so neither do we with Dict
# with pytest.raises(TypeError):
# self.assertEqual(Dict({2}) | Dict({1: 1}).keys(), {1, 2})
# self.assertTrue(Dict({1}) == {1: 1}.keys())
# self.assertEqual(Dict({2}) | Dict({1: 1}).items(), {(1, 1), 2})
# self.assertEqual(Dict({1: 1}).items() | Dict({2}), {(1, 1), 2})
def test_missing(self):
# Make sure dict doesn't have a __missing__ method
self.assertFalse(hasattr(Dict, "__missing__"))
self.assertFalse(hasattr(Dict(), "__missing__"))
# Test several cases:
# (D) subclass defines __missing__ method returning a value
# (E) subclass defines __missing__ method raising RuntimeError
# (F) subclass sets __missing__ instance variable (no effect)
# (G) subclass doesn't define __missing__ at all
class D(Dict):
def __missing__(self, key):
return 42
d = D({1: 2, 3: 4})
self.assertEqual(d[1], 2)
self.assertEqual(d[3], 4)
self.assertNotIn(2, d)
self.assertNotIn(2, d.keys())
self.assertEqual(d[2], 42)
class E(dict):
def __missing__(self, key):
raise RuntimeError(key)
e = E()
with self.assertRaises(RuntimeError) as c:
e[42]
self.assertEqual(c.exception.args, (42,))
class F(dict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
with self.assertRaises(KeyError) as c:
f[42]
self.assertEqual(c.exception.args, (42,))
class G(dict):
pass
g = G()
with self.assertRaises(KeyError) as c:
g[42]
self.assertEqual(c.exception.args, (42,))
def test_tuple_keyerror(self):
# SF #1576657
d = Dict()
with self.assertRaises(KeyError) as c:
d[(1,)]
self.assertEqual(c.exception.args, ((1,),))
def test_bad_key(self):
# Dictionary lookups should fail if __eq__() raises an exception.
class CustomException(Exception):
pass
class BadDictKey:
def __hash__(self):
return hash(self.__class__)
def __eq__(self, other):
if isinstance(other, self.__class__):
raise CustomException
return other
d = Dict()
x1 = BadDictKey()
x2 = BadDictKey()
d[x1] = 1
for stmt in [
"d[x2] = 2",
"z = d[x2]",
"x2 in d",
"d.get(x2)",
"d.setdefault(x2, 42)",
"d.pop(x2)",
"d.update({x2: 2})",
]:
with self.assertRaises(CustomException):
exec(stmt, locals())
def test_resize1(self):
# Dict resizing bug, found by Jack Jansen in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = Dict()
for i in range(5):
d[i] = i
for i in range(5):
del d[i]
for i in range(5, 9): # i==8 was the problem
d[i] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(object):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = Dict()
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d[9] = 6
def test_empty_presized_dict_in_freelist(self):
# Bug #3537: if an empty but presized dict with a size larger
# than 7 was in the freelist, it triggered an assertion failure
with self.assertRaises(ZeroDivisionError):
d = Dict(
{
"a": 1 // 0,
"b": None,
"c": None,
"d": None,
"e": None,
"f": None,
"g": None,
"h": None,
}
)
d.clear()
@pytest.mark.slow
def test_container_iterator(self):
# TODO: make this pass
pass
# # Bug #3680: tp_traverse was not implemented for dictiter and
# # dictview objects.
# class C(object):
# pass
#
# views = (Dict.items, Dict.values, Dict.keys)
# for v in views:
# obj = C()
# ref = weakref.ref(obj)
# container = {obj: 1}
# obj.v = v(container)
# obj.x = iter(obj.v)
# del obj, container
# gc.collect()
# self.assertIs(ref(), None, "Cycle was not collected")
def _not_tracked(self, t):
# Nested containers can take several collections to untrack
gc.collect()
gc.collect()
# UserDict is tracked unlike normal dict so we have to change
# this test for our Dict
# self.assertFalse(gc.is_tracked(t), t)
self.assertTrue(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@pytest.mark.slow
@support.cpython_only
def test_track_literals(self):
# Test GC-optimization of dict literals
x, y, z = 1.5, "a", (1, None)
self._not_tracked(Dict())
self._not_tracked(Dict({x: (), y: x, z: 1}))
self._not_tracked(Dict({1: "a", "b": 2}))
self._not_tracked(Dict({1: 2, (None, True, False, ()): int}))
self._not_tracked(Dict({1: object()}))
# Dicts with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked(Dict({1: []}))
self._tracked(Dict({1: ([],)}))
self._tracked(Dict({1: {}}))
self._tracked(Dict({1: set()}))
@pytest.mark.slow
@support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically-created dicts
class MyObject(object):
pass
x, y, z, w, o = 1.5, "a", (1, object()), [], MyObject()
d = Dict()
self._not_tracked(d)
d[1] = "a"
self._not_tracked(d)
d[y] = 2
self._not_tracked(d)
d[z] = 3
self._not_tracked(d)
self._not_tracked(d.copy())
d[4] = w
self._tracked(d)
self._tracked(d.copy())
d[4] = None
self._not_tracked(d)
self._not_tracked(d.copy())
# dd isn't tracked right now, but it may mutate and therefore d
# which contains it must be tracked.
d = Dict()
dd = Dict()
d[1] = dd
self._not_tracked(dd)
self._tracked(d)
dd[1] = d
self._tracked(dd)
d = Dict.fromkeys([x, y, z])
self._not_tracked(d)
dd = Dict()
dd.update(d)
self._not_tracked(dd)
d = Dict.fromkeys([x, y, z, o])
self._tracked(d)
dd = Dict()
dd.update(d)
self._tracked(dd)
d = Dict(x=x, y=y, z=z)
self._not_tracked(d)
d = Dict(x=x, y=y, z=z, w=w)
self._tracked(d)
d = Dict()
d.update(x=x, y=y, z=z)
self._not_tracked(d)
d.update(w=w)
self._tracked(d)
d = Dict([(x, y), (z, 1)])
self._not_tracked(d)
d = Dict([(x, y), (z, w)])
self._tracked(d)
d = Dict()
d.update([(x, y), (z, 1)])
self._not_tracked(d)
d.update([(x, y), (z, w)])
self._tracked(d)
@support.cpython_only
def test_track_subtypes(self):
# Dict subtypes are always tracked
class MyDict(Dict):
pass
self._tracked(MyDict())
def make_shared_key_dict(self, n):
class C:
pass
dicts = []
for i in range(n):
a = C()
a.x, a.y, a.z = 1, 2, 3
dicts.append(a.__dict__)
return dicts
@support.cpython_only
def test_splittable_setdefault(self):
"""split table must be combined when setdefault()
breaks insertion order"""
a, b = self.make_shared_key_dict(2)
a["a"] = 1
size_a = sys.getsizeof(a)
a["b"] = 2
b.setdefault("b", 2)
size_b = sys.getsizeof(b)
b["a"] = 1
self.assertGreater(size_b, size_a)
self.assertEqual(list(a), ["x", "y", "z", "a", "b"])
self.assertEqual(list(b), ["x", "y", "z", "b", "a"])
@support.cpython_only
def test_splittable_del(self):
"""split table must be combined when del d[k]"""
a, b = self.make_shared_key_dict(2)
orig_size = sys.getsizeof(a)
del a["y"] # split table is combined
with self.assertRaises(KeyError):
del a["y"]
self.assertGreater(sys.getsizeof(a), orig_size)
self.assertEqual(list(a), ["x", "z"])
self.assertEqual(list(b), ["x", "y", "z"])
# Two dicts have different insertion order.
a["y"] = 42
self.assertEqual(list(a), ["x", "z", "y"])
self.assertEqual(list(b), ["x", "y", "z"])
@support.cpython_only
def test_splittable_pop(self):
"""split table must be combined when d.pop(k)"""
a, b = self.make_shared_key_dict(2)
orig_size = sys.getsizeof(a)
a.pop("y") # split table is combined
with self.assertRaises(KeyError):
a.pop("y")
self.assertGreater(sys.getsizeof(a), orig_size)
self.assertEqual(list(a), ["x", "z"])
self.assertEqual(list(b), ["x", "y", "z"])
# Two dicts have different insertion order.
a["y"] = 42
self.assertEqual(list(a), ["x", "z", "y"])
self.assertEqual(list(b), ["x", "y", "z"])
@support.cpython_only
def test_splittable_pop_pending(self):
"""pop a pending key in a splitted table should not crash"""
a, b = self.make_shared_key_dict(2)
a["a"] = 4
with self.assertRaises(KeyError):
b.pop("a")
@support.cpython_only
def test_splittable_popitem(self):
"""split table must be combined when d.popitem()"""
a, b = self.make_shared_key_dict(2)
orig_size = sys.getsizeof(a)
item = a.popitem() # split table is combined
self.assertEqual(item, ("z", 3))
with self.assertRaises(KeyError):
del a["z"]
self.assertGreater(sys.getsizeof(a), orig_size)
self.assertEqual(list(a), ["x", "y"])
self.assertEqual(list(b), ["x", "y", "z"])
@support.cpython_only
def test_splittable_setattr_after_pop(self):
"""setattr() must not convert combined table into split table."""
# Issue 28147
# third party
import _testcapi
class C:
pass
a = C()
a.a = 1
self.assertTrue(_testcapi.dict_hassplittable(a.__dict__))
# dict.pop() convert it to combined table
a.__dict__.pop("a")
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
# But C should not convert a.__dict__ to split table again.
a.a = 1
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
# Same for popitem()
a = C()
a.a = 2
self.assertTrue(_testcapi.dict_hassplittable(a.__dict__))
a.__dict__.popitem()
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
a.a = 3
self.assertFalse(_testcapi.dict_hassplittable(a.__dict__))
@pytest.mark.xfail
def test_iterator_pickling(self):
# set to xfail because we dont really care about pickling
# see test_valuesiterator_pickling
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
data = Dict({1: "a", 2: "b", 3: "c"})
it = iter(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), list(data))
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop]
self.assertEqual(list(it), list(data))
def test_itemiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# UserDict fails these tests so our Dict should fail as well
with pytest.raises(TypeError):
data = Dict({1: "a", 2: "b", 3: "c"})
# dictviews aren't picklable, only their iterators
itorg = iter(data.items())
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# note that the type of the unpickled iterator
# is not necessarily the same as the original. It is
# merely an object supporting the iterator protocol, yielding
# the same objects as the original one.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(Dict(it), data)
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop[0]]
self.assertEqual(Dict(it), data)
def test_valuesiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# UserDict fails these tests so our Dict should fail as well
with pytest.raises(TypeError):
data = Dict({1: "a", 2: "b", 3: "c"})
# data.values() isn't picklable, only its iterator
it = iter(data.values())
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), list(data.values()))
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
values = list(it) + [drop]
self.assertEqual(sorted(values), sorted(list(data.values())))
def test_reverseiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# UserDict fails these tests so our Dict should fail as well
with pytest.raises(TypeError):
data = Dict({1: "a", 2: "b", 3: "c"})
it = reversed(data)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), list(reversed(data)))
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop]
self.assertEqual(list(it), list(reversed(data)))
def test_reverseitemiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# UserDict fails these tests so our Dict should fail as well
with pytest.raises(TypeError):
data = Dict({1: "a", 2: "b", 3: "c"})
# dictviews aren't picklable, only their iterators
itorg = reversed(data.items())
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# note that the type of the unpickled iterator
# is not necessarily the same as the original. It is
# merely an object supporting the iterator protocol, yielding
# the same objects as the original one.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(Dict(it), data)
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
del data[drop[0]]
self.assertEqual(Dict(it), data)
def test_reversevaluesiterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# UserDict fails these tests so our Dict should fail as well
with pytest.raises(TypeError):
data = Dict({1: "a", 2: "b", 3: "c"})
# data.values() isn't picklable, only its iterator
it = reversed(data.values())
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(list(it), list(reversed(data.values())))
it = pickle.loads(d)
drop = next(it)
d = pickle.dumps(it, proto)
it = pickle.loads(d)
values = list(it) + [drop]
self.assertEqual(sorted(values), sorted(data.values()))
def test_instance_dict_getattr_str_subclass(self):
class Foo:
def __init__(self, msg):
self.msg = msg
f = Foo("123")
class _str(str):
pass
self.assertEqual(f.msg, getattr(f, _str("msg")))
self.assertEqual(f.msg, f.__dict__[_str("msg")])
def test_object_set_item_single_instance_non_str_key(self):
class Foo:
pass
f = Foo()
f.__dict__[1] = 1
f.a = "a"
self.assertEqual(f.__dict__, {1: 1, "a": "a"})
def check_reentrant_insertion(self, mutate):
# This object will trigger mutation of the dict when replaced
# by another value. Note this relies on refcounting: the test
# won't achieve its purpose on fully-GCed Python implementations.
class Mutating:
def __del__(self):
mutate(d)
d = Dict({k: Mutating() for k in "abcdefghijklmnopqr"})
for k in list(d):
d[k] = k
def test_reentrant_insertion(self):
# Reentrant insertion shouldn't crash (see issue #22653)
def mutate(d):
d["b"] = 5
self.check_reentrant_insertion(mutate)
def mutate(d):
d.update(self.__dict__)
d.clear()
self.check_reentrant_insertion(mutate)
def mutate(d):
while d:
d.popitem()
self.check_reentrant_insertion(mutate)
@pytest.mark.slow
def test_merge_and_mutate(self):
# this fails because it expects a RuntimeError when the keys change, however
# the test_dictitems_contains_use_after_free expects StopIteration when the
# keys change?
class X:
def __hash__(self):
return 0
def __eq__(self, o):
other.clear()
return False
test_list = [(i, 0) for i in range(1, 1337)]
other = Dict(test_list)
other[X()] = 0
d = Dict({X(): 0, 1: 1})
self.assertRaises(RuntimeError, d.update, other)
@pytest.mark.xfail
@pytest.mark.slow
def test_free_after_iterating(self):
# this seems like a bit of a puzzle
support.check_free_after_iterating(self, iter, Dict)
support.check_free_after_iterating(self, lambda d: iter(d.keys()), Dict)
support.check_free_after_iterating(self, lambda d: iter(d.values()), Dict)
support.check_free_after_iterating(self, lambda d: iter(d.items()), Dict)
def test_fromkeys_operator_modifying_dict_operand(self):
# test fix for seg fault reported in issue 27945 part 4a.
class X(int):
def __hash__(self):
return 13
def __eq__(self, other):
if len(d) > 1:
d.clear()
return False
d = Dict() # this is required to exist so that d can be constructed!
d = Dict({X(1): 1, X(2): 2})
try:
dict.fromkeys(d) # shouldn't crash
except RuntimeError: # implementation defined
pass
def test_fromkeys_operator_modifying_set_operand(self):
# test fix for seg fault reported in issue 27945 part 4b.
class X(int):
def __hash__(self):
return 13
def __eq__(self, other):
if len(d) > 1:
d.clear()
return False
d = {} # this is required to exist so that d can be constructed!
d = {X(1), X(2)}
try:
Dict.fromkeys(d) # shouldn't crash
except RuntimeError: # implementation defined
pass
@pytest.mark.xfail
def test_dictitems_contains_use_after_free(self):
# this seems like a bit of a puzzle
# see iterator.py for more details
class X:
def __eq__(self, other):
d.clear()
return NotImplemented
d = Dict({0: String("test")}) # TODO: we should be able to support set
(0, X()) in d.items()
def test_init_use_after_free(self):
class X:
def __hash__(self):
pair[:] = []
return 13
pair = [X(), 123]
Dict([pair])
def test_oob_indexing_dictiter_iternextitem(self):
# TODO: investigate this
pass
# class X(int):
# def __del__(self):
# d.clear()
#
# d = Dict({i: X(i) for i in range(8)})
#
# def iter_and_mutate():
# for result in d.items():
# if result[0] == 2:
# d[2] = None # free d[2] --> X(2).__del__ was called
#
# self.assertRaises(RuntimeError, iter_and_mutate)
def test_reversed(self):
d = Dict({"a": 1, "b": 2, "foo": 0, "c": 3, "d": 4})
del d["foo"]
# UserDict does not support reversed so we do not either
with pytest.raises(TypeError):
r = reversed(d)
self.assertEqual(list(r), list("dcba"))
self.assertRaises(StopIteration, next, r)
def test_reverse_iterator_for_empty_dict(self):
# bpo-38525: revered iterator should work properly
# empty dict is directly used for reference count test
# UserDict does not support reversed so we do not either
with pytest.raises(TypeError):
self.assertEqual(list(reversed(Dict())), [])
self.assertEqual(list(reversed(Dict().items())), [])
self.assertEqual(list(reversed(Dict().values())), [])
self.assertEqual(list(reversed(Dict().keys())), [])
# dict() and {} don't trigger the same code path
self.assertEqual(list(reversed(dict())), [])
self.assertEqual(list(reversed(dict().items())), [])
self.assertEqual(list(reversed(dict().values())), [])
self.assertEqual(list(reversed(dict().keys())), [])
# def test_reverse_iterator_for_shared_shared_dicts(self):
# # UserDict doesnt support reversed and this causes infinite recursion
# # we will just disable this test
# class A:
# def __init__(self, x, y):
# if x:
# self.x = x
# if y:
# self.y = y
# self.assertEqual(list(reversed(A(1, 2).__dict__)), ["y", "x"])
# self.assertEqual(list(reversed(A(1, 0).__dict__)), ["x"])
# self.assertEqual(list(reversed(A(0, 1).__dict__)), ["y"])
def test_dict_copy_order(self):
# bpo-34320
od = collections.OrderedDict([("a", 1), ("b", 2)])
od.move_to_end("a")
expected = list(od.items())
copy = Dict(od)
self.assertEqual(list(copy.items()), expected)
# dict subclass doesn't override __iter__
class CustomDict(Dict):
pass
pairs = [("a", 1), ("b", 2), ("c", 3)]
d = CustomDict(pairs)
self.assertEqual(pairs, list(Dict(d).items()))
# UserDict doesnt support reversed and this causes infinite recursion
# we will just disable this test
# class CustomReversedDict(dict):
# def keys(self):
# return reversed(list(dict.keys(self)))
# __iter__ = keys
# def items(self):
# return reversed(dict.items(self))
# d = CustomReversedDict(pairs)
# self.assertEqual(pairs[::-1], list(dict(d).items()))
| 32.078844
| 91
| 0.516017
|
ad9ad171195c61fce9d311a4ba2ac3865b799746
| 15,756
|
py
|
Python
|
segmentation/builders/preprocessor_builder.py
|
deltaautonomy/delta_perception
|
7b40f005b9753464016207d142969890ff6ec031
|
[
"BSD-3-Clause"
] | 1
|
2021-05-15T13:05:09.000Z
|
2021-05-15T13:05:09.000Z
|
segmentation/builders/preprocessor_builder.py
|
deltaautonomy/delta_perception
|
7b40f005b9753464016207d142969890ff6ec031
|
[
"BSD-3-Clause"
] | null | null | null |
segmentation/builders/preprocessor_builder.py
|
deltaautonomy/delta_perception
|
7b40f005b9753464016207d142969890ff6ec031
|
[
"BSD-3-Clause"
] | 3
|
2020-02-20T21:59:02.000Z
|
2021-05-15T13:05:10.000Z
|
r"""Preprocessing step for inptut images"""
import functools
import tensorflow as tf
from protos import preprocessor_pb2
from builders import dataset_builder
RESIZE_METHOD_MAP = {
preprocessor_pb2.BICUBIC: tf.image.ResizeMethod.BICUBIC,
preprocessor_pb2.BILINEAR: tf.image.ResizeMethod.BILINEAR,
preprocessor_pb2.NEAREST_NEIGHBOR: (
tf.image.ResizeMethod.NEAREST_NEIGHBOR),
}
_RANDOM_SCALE_STEP_KEY = 'RANDOM_SCALE_STEP'
_IMAGE_CROP_KEY = 'IMAGE_CROP_STEP'
_IMAGE_SCALE_KEY = 'IMAGE_SCALE_KEY'
_IMAGE_HORIZONTAL_FLIP_KEY = 'IMAGE_HORIZONTAL_FLIP_STEP'
_RANDOM_PREPROCESSOR_SEED = 7
def _get_or_create_preprocess_rand_vars(generator_func,
function_id,
preprocess_vars_cache):
if preprocess_vars_cache is not None:
var = preprocess_vars_cache.get(function_id)
if var is None:
var = generator_func()
preprocess_vars_cache.update({ function_id: var })
else:
var = generator_func()
return var
def set_fixed_image_size(images,
labels,
height_to_set,
width_to_set,
images_channel_dim=3,
labels_channel_dim=1,
preprocess_vars_cache=None):
with tf.name_scope('DimensionInput', values=[images, labels]):
fixed_input_tensor_shape = (
height_to_set, width_to_set, images_channel_dim)
images.set_shape(fixed_input_tensor_shape)
fixed_label_tensor_shape = (
height_to_set, width_to_set, labels_channel_dim)
labels.set_shape(fixed_label_tensor_shape)
return images, labels
def pad_to_specific_size(images,
labels,
height_to_set,
width_to_set,
images_channel_dim=3,
labels_channel_dim=1,
preprocess_vars_cache=None):
with tf.name_scope('PadInput', values=[images, labels]):
fixed_input_tensor_shape = (
height_to_set, width_to_set, images_channel_dim)
padded_images = tf.image.pad_to_bounding_box(
images, 0, 0, height_to_set, width_to_set)
padded_images.set_shape(fixed_input_tensor_shape)
fixed_label_tensor_shape = (
height_to_set, width_to_set, labels_channel_dim)
padded_labels = None
if labels is not None:
padded_labels = tf.image.pad_to_bounding_box(
labels, 0, 0, height_to_set, width_to_set)
padded_labels.set_shape(fixed_label_tensor_shape)
return padded_images, padded_labels
def _compute_new_static_size(image, min_dimension, max_dimension):
"""Compute new static shape for resize_to_range method."""
image_shape = image.get_shape().as_list()
orig_height = image_shape[0]
orig_width = image_shape[1]
num_channels = image_shape[2]
orig_min_dim = min(orig_height, orig_width)
# Calculates the larger of the possible sizes
large_scale_factor = min_dimension / float(orig_min_dim)
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = int(round(orig_height * large_scale_factor))
large_width = int(round(orig_width * large_scale_factor))
large_size = [large_height, large_width]
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = max(orig_height, orig_width)
small_scale_factor = max_dimension / float(orig_max_dim)
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = int(round(orig_height * small_scale_factor))
small_width = int(round(orig_width * small_scale_factor))
small_size = [small_height, small_width]
new_size = large_size
if max(large_size) > max_dimension:
new_size = small_size
else:
new_size = large_size
return tf.constant(new_size + [num_channels])
def resize_to_range(image,
label=None,
min_dimension=None,
max_dimension=None,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=True,
pad_to_max_dimension=False):
with tf.name_scope('ResizeToRange', values=[image, min_dimension]):
if image.get_shape().is_fully_defined():
new_size = _compute_new_static_size(image,
min_dimension, max_dimension)
else:
new_size = _compute_new_dynamic_size(image,
min_dimension, max_dimension)
new_size = _compute_new_dynamic_size(item,
min_dimension, max_dimension)
new_image = tf.image.resize_bilinear(image,
new_size[:-1], align_corners=True)
new_label = tf.image.resize_nearest_neighbor(labels,
new_size[:-1], align_corners=True)
return (new_image, new_label)
def random_scale(images,
labels,
min_scale_ratio=0.5,
max_scale_ratio=2.0,
pad_to_dims=None,
seed=_RANDOM_PREPROCESSOR_SEED,
preprocess_vars_cache=None):
with tf.name_scope('RandomScale', values=[images, labels]):
image_height, image_width, _ = images.get_shape().as_list()
generator_func = functools.partial(
tf.random_uniform, [],
minval=min_scale_ratio, maxval=max_scale_ratio,
dtype=tf.float32, seed=seed)
size_coef = _get_or_create_preprocess_rand_vars(
generator_func, _IMAGE_SCALE_KEY,
preprocess_vars_cache)
image_newysize = tf.to_int32(
tf.multiply(tf.to_float(image_height), size_coef))
image_newxsize = tf.to_int32(
tf.multiply(tf.to_float(image_width), size_coef))
new_shape = (image_newysize, image_newxsize)
# Must be 4D tensor for resize ops
images = tf.expand_dims(images, 0)
labels = tf.expand_dims(labels, 0)
scaled_images = tf.image.resize_bilinear(
images, new_shape, align_corners=True)
scaled_labels = tf.image.resize_nearest_neighbor(
labels, new_shape, align_corners=True)
if pad_to_dims is not None:
crop_height, crop_width = pad_to_dims
target_height = (image_newysize +
tf.maximum(crop_height - image_newysize, 0))
target_width = (image_newxsize +
tf.maximum(crop_width - image_newxsize, 0))
scaled_images = tf.image.pad_to_bounding_box(
scaled_images, 0, 0, target_height, target_width)
scaled_labels = tf.image.pad_to_bounding_box(
scaled_labels, 0, 0, target_height, target_width)
output_images = tf.squeeze(scaled_images, [0])
output_labels = tf.squeeze(scaled_labels, [0])
return output_images, output_labels
def random_crop(images, labels,
crop_height, crop_width,
images_channel_dim=3,
labels_channel_dim=1,
preprocess_vars_cache=None):
def _apply_random_crop(inputs, offsets, crop_shape):
sliced_inputs = tf.slice(inputs, offsets, crop_shape)
out_inputs = tf.reshape(sliced_inputs, crop_shape)
return out_inputs
with tf.name_scope('RandomCropImage', values=[images, labels]):
images_shape = tf.shape(images)
images_height = images_shape[0]
images_width = images_shape[1]
max_offset_height = tf.reshape(images_height-crop_height+1, [])
max_offset_width = tf.reshape(images_width-crop_width+1, [])
generator_func_height = functools.partial(
tf.random_uniform,
shape=[], maxval=max_offset_height, dtype=tf.int32)
generator_func_width = functools.partial(
tf.random_uniform,
shape=[], maxval=max_offset_width, dtype=tf.int32)
offset_height = _get_or_create_preprocess_rand_vars(
generator_func_height,
_IMAGE_CROP_KEY+'_0',
preprocess_vars_cache)
offset_width = _get_or_create_preprocess_rand_vars(
generator_func_width,
_IMAGE_CROP_KEY+'_1',
preprocess_vars_cache)
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
crop_shape_images = tf.stack(
[crop_height, crop_width, images_channel_dim])
crop_shape_labels = tf.stack(
[crop_height, crop_width, labels_channel_dim])
cropped_images = _apply_random_crop(images, offsets, crop_shape_images)
cropped_labels = _apply_random_crop(labels, offsets, crop_shape_labels)
# Must set shape here or in the set shape preprocessor step
# when dealing with ICNet
if images_channel_dim and labels_channel_dim:
cropped_images.set_shape((crop_height, crop_width,
images_channel_dim))
cropped_labels.set_shape((crop_height, crop_width,
labels_channel_dim))
return cropped_images, cropped_labels
def random_horizontal_flip(images,
labels,
seed=_RANDOM_PREPROCESSOR_SEED,
preprocess_vars_cache=None):
def _flip_image(item):
flipped_item = tf.image.flip_left_right(item)
return flipped_item
with tf.name_scope('RandomHorizontalFlip', values=[images, labels]):
generator_func = functools.partial(
tf.random_uniform, [], seed=seed)
do_a_flip_random = _get_or_create_preprocess_rand_vars(
generator_func, _IMAGE_HORIZONTAL_FLIP_KEY,
preprocess_vars_cache)
do_a_flip_random = tf.greater(do_a_flip_random, 0.5)
flipped_images = tf.cond(do_a_flip_random,
lambda: _flip_image(images), lambda: images)
flipped_labels = tf.cond(do_a_flip_random,
lambda: _flip_image(labels), lambda: labels)
return flipped_images, flipped_labels
def preprocess_runner(tensor_dict, func_list, skip_labels=False, preprocess_vars_cache=None):
if dataset_builder._IMAGE_FIELD not in tensor_dict \
or dataset_builder._LABEL_FIELD not in tensor_dict:
raise ValueError('"tensor_dict" must have both image'
'and label fields')
for item_key in [dataset_builder._IMAGE_FIELD,
dataset_builder._LABEL_FIELD]:
items = tensor_dict[item_key]
if len(items.get_shape()) != 3:
raise ValueError('Images or Labels in tensor_dict should be rank 4')
tensor_dict[item_key] = items
if preprocess_vars_cache is None:
preprocess_vars_cache = {}
images = tf.to_float(tensor_dict[dataset_builder._IMAGE_FIELD])
images_shape = tf.shape(images)
# For now, we skip labels preprocessing for eval only, since we
# do whole image evaluation
# TODO: Fix this so it doesn't break for training
labels = None
if not skip_labels:
labels = tf.to_float(tensor_dict[dataset_builder._LABEL_FIELD])
# Apple proprocessor functions
for preprocessor_step_func in func_list:
images, labels = preprocessor_step_func(images=images, labels=labels,
preprocess_vars_cache=preprocess_vars_cache)
output_dict = {}
output_dict[dataset_builder._IMAGE_FIELD] = images
output_dict[dataset_builder._LABEL_FIELD] = labels
return output_dict
def build(preprocessor_config_list):
proprocessor_func_list = []
for preprocessor_step_config in preprocessor_config_list:
step_type = preprocessor_step_config.WhichOneof('preprocessing_step')
# Fixed image width and height for PSP module
if step_type == 'set_fixed_image_size':
config = preprocessor_step_config.set_fixed_image_size
dimension_image_fn = functools.partial(
set_fixed_image_size,
height_to_set=config.fixed_height,
width_to_set=confi.fixed_width,
images_channel_dim=config.images_channel_dim,
labels_channel_dim=config.labels_channel_dim)
proprocessor_func_list.append(dimension_image_fn)
# Resize the image and keep the aspect_ratio
if step_type == 'aspect_ratio_image_resize':
config = preprocessor_step_config.aspect_ratio_image_resize
if not (config.min_dimension <= config.max_dimension):
raise ValueError('min_dimension > max_dimension')
method = RESIZE_METHOD_MAP[config.resize_method]
image_resizer_fn = functools.partial(
resize_to_range,
min_dimension=config.min_dimension,
max_dimension=config.max_dimension,
pad_to_max_dimension=config.pad_to_max_dimension)
proprocessor_func_list.append(image_resizer_fn)
# Randomly Scale the image
if step_type == 'random_image_scale':
config = preprocessor_step_config.random_image_scale
if not (config.max_scale_ratio >= config.min_scale_ratio):
raise ValueError('min_scale_ratio > max_scale_ratio')
pad_to_dims = None
for cfg in preprocessor_config_list:
step_t = cfg.WhichOneof('preprocessing_step')
if step_t == 'random_image_crop':
dim = cfg.random_image_crop
pad_to_dims = (dim.crop_height, dim.crop_width)
image_scale_fn = functools.partial(
random_scale,
pad_to_dims=pad_to_dims,
min_scale_ratio=config.min_scale_ratio,
max_scale_ratio=config.max_scale_ratio)
proprocessor_func_list.append(image_scale_fn)
# Randomly crop the image
if step_type == 'random_image_crop':
config = preprocessor_step_config.random_image_crop
image_crop_fn = functools.partial(
random_crop,
crop_height=config.crop_height,
crop_width=config.crop_width,
images_channel_dim=config.images_channel_dim,
labels_channel_dim=config.labels_channel_dim)
proprocessor_func_list.append(image_crop_fn)
# Random Flips and Rotations
if step_type == 'random_horizontal_flip':
config = preprocessor_step_config.random_horizontal_flip
image_horizontal_flip_fn = functools.partial(
random_horizontal_flip)
proprocessor_func_list.append(image_horizontal_flip_fn)
if len(proprocessor_func_list) <= 0 and \
len(preprocessor_config_list) > 0:
raise ValueError('Unknown preprocessing step.')
preprocessor = functools.partial(
preprocess_runner,
func_list=proprocessor_func_list)
return preprocessor
| 42.128342
| 93
| 0.644326
|
412321471beba6aebbfbaa4089d47449594401ee
| 10,421
|
py
|
Python
|
preprocessor/vctk.py
|
mbarnig/Comprehensive-Tacotron2
|
f7604058af5f0112106fa2cf489caef94dd1b6f5
|
[
"MIT"
] | 25
|
2021-07-24T06:31:15.000Z
|
2022-03-29T03:20:47.000Z
|
preprocessor/vctk.py
|
mbarnig/Comprehensive-Tacotron2
|
f7604058af5f0112106fa2cf489caef94dd1b6f5
|
[
"MIT"
] | 5
|
2021-07-25T08:07:02.000Z
|
2022-03-19T16:47:45.000Z
|
preprocessor/vctk.py
|
mbarnig/Comprehensive-Tacotron2
|
f7604058af5f0112106fa2cf489caef94dd1b6f5
|
[
"MIT"
] | 8
|
2021-07-24T07:18:11.000Z
|
2021-12-14T12:56:13.000Z
|
import os
import random
import json
import re
import tgt
import librosa
import numpy as np
from tqdm import tqdm
from pathlib import Path
import audio as Audio
from text import text_to_sequence
from model import PreDefinedEmbedder
from utils.tools import save_mel_and_audio, plot_embedding
random.seed(1234)
class Preprocessor:
def __init__(self, config):
self.dataset = config["dataset"]
self.in_dir = config["path"]["corpus_path"]
self.wav_tag = config["path"]["wav_tag"]
self.wav_dir = config["path"]["wav_dir"]
self.txt_dir = config["path"]["txt_dir"]
self.out_dir = config["path"]["preprocessed_path"]
self.val_size = config["preprocessing"]["val_size"]
self.sampling_rate = config["preprocessing"]["audio"]["sampling_rate"]
self.skip_len = config["preprocessing"]["audio"]["skip_len"]
self.trim_top_db = config["preprocessing"]["audio"]["trim_top_db"]
self.filter_length = config["preprocessing"]["stft"]["filter_length"]
self.hop_length = config["preprocessing"]["stft"]["hop_length"]
self.silence_audio_size = config["preprocessing"]["audio"]["silence_audio_size"]
self.pre_emphasis = config["preprocessing"]["audio"]["pre_emphasis"]
self.max_wav_value = config["preprocessing"]["audio"]["max_wav_value"]
self.sanity_check = config["preprocessing"]["sanity_check"]
self.cleaners = config["preprocessing"]["text"]["text_cleaners"]
self.STFT = Audio.stft.TacotronSTFT(
config["preprocessing"]["stft"]["filter_length"],
config["preprocessing"]["stft"]["hop_length"],
config["preprocessing"]["stft"]["win_length"],
config["preprocessing"]["mel"]["n_mel_channels"],
config["preprocessing"]["audio"]["sampling_rate"],
config["preprocessing"]["mel"]["mel_fmin"],
config["preprocessing"]["mel"]["mel_fmax"],
)
self.speaker_emb = None
if config["preprocessing"]["speaker_embedder"] != "none":
self.speaker_emb = PreDefinedEmbedder(config)
def _init_spker_embeds(self, spkers):
spker_embeds = dict()
for spker in spkers:
spker_embeds[spker] = list()
return spker_embeds
def build_from_path(self):
txt_dir = os.path.join(self.in_dir, self.txt_dir)
wav_dir = os.path.join(self.in_dir, self.wav_dir)
embedding_dir = os.path.join(self.out_dir, "spker_embed")
os.makedirs((os.path.join(self.out_dir, "text")), exist_ok=True)
os.makedirs((os.path.join(self.out_dir, "mel")), exist_ok=True)
os.makedirs(embedding_dir, exist_ok=True)
print("Processing Data ...")
# out = list()
train = list()
val = list()
n_frames = 0
mel_min = float('inf')
mel_max = -float('inf')
max_seq_len = -float('inf')
sub_dirs = os.listdir(txt_dir)
if self.speaker_emb is not None:
spker_embeds = self._init_spker_embeds(sub_dirs)
skip_speakers = set()
preprocessed_dirs = os.listdir(embedding_dir)
for preprocessed_dir in preprocessed_dirs:
skip_speakers.add(preprocessed_dir.split("-")[0])
speakers = {}
for spker_id, speaker in enumerate(tqdm(sub_dirs)):
speakers[speaker] = spker_id
for i, txt_name in enumerate(tqdm(os.listdir(os.path.join(txt_dir, speaker)))):
basename = txt_name.split(".")[0]
with open(os.path.join(txt_dir, speaker, txt_name), "r") as f:
text = f.readline().strip("\n")
wav_path = os.path.join(os.path.join(wav_dir, speaker), "{}_{}.flac".format(basename, self.wav_tag))
if not os.path.isfile(wav_path):
print("[Error] No flac file:{}".format(wav_path))
continue
ret = self.process_utterance(text, wav_path, speaker, basename, skip_speakers)
if ret is None:
continue
else:
info, n, m_min, m_max, spker_embed = ret
# out.append(info)
if i == 0 or i == 1:
val.append(info)
else:
train.append(info)
if self.speaker_emb is not None:
spker_embeds[speaker].append(spker_embed)
if mel_min > m_min:
mel_min = m_min
if mel_max < m_max:
mel_max = m_max
if n > max_seq_len:
max_seq_len = n
n_frames += n
# Calculate and save mean speaker embedding of this speaker
if self.speaker_emb is not None and speaker not in skip_speakers:
spker_embed_filename = '{}-spker_embed.npy'.format(speaker)
np.save(os.path.join(self.out_dir, 'spker_embed', spker_embed_filename), \
np.mean(spker_embeds[speaker], axis=0), allow_pickle=False)
# Save files
with open(os.path.join(self.out_dir, "speakers.json"), "w") as f:
f.write(json.dumps(speakers))
with open(os.path.join(self.out_dir, "stats.json"), "w") as f:
stats = {
"mel": [
float(mel_min),
float(mel_max),
],
"max_seq_len": max_seq_len
}
f.write(json.dumps(stats))
print(
"Total time: {} hours".format(
n_frames * self.hop_length / self.sampling_rate / 3600
)
)
print("plot speaker embedding...")
plot_embedding(
self.out_dir, *self.load_embedding(embedding_dir),
self.divide_speaker_by_gender(self.in_dir), filename="spker_embed_tsne.png"
)
# random.shuffle(out)
# out = [r for r in out if r is not None]
train = [r for r in train if r is not None]
val = [r for r in val if r is not None]
# Write metadata
with open(os.path.join(self.out_dir, "train.txt"), "w", encoding="utf-8") as f:
# for m in out[self.val_size :]:
for m in train:
f.write(m + "\n")
with open(os.path.join(self.out_dir, "val.txt"), "w", encoding="utf-8") as f:
# for m in out[: self.val_size]:
for m in val:
f.write(m + "\n")
return train, val #out
def load_audio(self, wav_path):
wav_raw, _ = librosa.load(wav_path, self.sampling_rate)
if len(wav_raw) < self.skip_len:
return None
wav = wav_raw / np.abs(wav_raw).max() * 0.999
wav = librosa.effects.trim(wav, top_db=self.trim_top_db, frame_length=self.filter_length, hop_length=self.hop_length)[0]
if self.pre_emphasis:
wav = np.append(wav[0], wav[1:] - 0.97 * wav[:-1])
wav = wav / np.abs(wav).max() * 0.999
wav = np.append(wav, [0.] * self.hop_length * self.silence_audio_size)
wav = wav.astype(np.float32)
return wav_raw, wav
def process_utterance(self, raw_text, wav_path, speaker, basename, skip_speakers):
text_filename = "{}-text-{}.npy".format(speaker, basename)
mel_filename = "{}-mel-{}.npy".format(speaker, basename)
# Preprocess text
if not os.path.isfile(os.path.join(self.out_dir, "text", text_filename)):
text = np.array(text_to_sequence(raw_text, self.cleaners))
np.save(
os.path.join(self.out_dir, "text", text_filename),
text,
)
else:
text = np.load(os.path.join(self.out_dir, "text", text_filename))
# Load and process wav files
wav_raw = wav = None
if not os.path.isfile(os.path.join(self.out_dir, "mel", mel_filename)):
wav_raw, wav = self.load_audio(wav_path)
# Compute mel-scale spectrogram
mel_spectrogram = Audio.tools.get_mel_from_wav(wav, self.STFT)
# Sanity check
if self.sanity_check:
save_mel_and_audio(mel_spectrogram, wav*self.max_wav_value,
self.sampling_rate, self.out_dir, basename, tag="processed"
)
save_mel_and_audio(Audio.tools.get_mel_from_wav(wav_raw, self.STFT), wav_raw*self.max_wav_value,
self.sampling_rate, self.out_dir, basename, tag="raw"
)
exit(0) # quit for testing
np.save(
os.path.join(self.out_dir, "mel", mel_filename),
mel_spectrogram.T,
)
else:
mel_spectrogram = np.load(os.path.join(self.out_dir, "mel", mel_filename)).T
# Speaker embedding
spker_embed=None
if self.speaker_emb is not None and speaker not in skip_speakers:
if wav is not None:
spker_embed = self.speaker_emb(wav)
else:
wav_raw, wav = self.load_audio(wav_path)
spker_embed = self.speaker_emb(wav)
return (
"|".join([basename, speaker, raw_text]),
mel_spectrogram.shape[1],
np.min(mel_spectrogram),
np.max(mel_spectrogram),
spker_embed,
)
def divide_speaker_by_gender(self, in_dir, speaker_path="speaker-info.txt"):
speakers = dict()
with open(os.path.join(in_dir, speaker_path), encoding='utf-8') as f:
for line in tqdm(f):
if "ID" in line: continue
parts = [p.strip() for p in re.sub(' +', ' ',(line.strip())).split(' ')]
spk_id, gender = parts[0], parts[2]
speakers[str(spk_id)] = gender
return speakers
def load_embedding(self, embedding_dir):
embedding_path_list = [_ for _ in Path(embedding_dir).rglob('*.npy')]
embedding = None
embedding_speaker_id = list()
# Gather data
for path in tqdm(embedding_path_list):
embedding = np.concatenate((embedding, np.load(path)), axis=0) \
if embedding is not None else np.load(path)
embedding_speaker_id.append(str(str(path).split('/')[-1].split('-')[0]))
return embedding, embedding_speaker_id
| 40.080769
| 128
| 0.571058
|
2604a019cd2415bfa579202eb1e4e992e8b6a276
| 7,718
|
py
|
Python
|
sdk/python/pulumi_aws/elasticache/outputs.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/elasticache/outputs.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/elasticache/outputs.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ClusterCacheNode',
'ParameterGroupParameter',
'ReplicationGroupClusterMode',
'GetClusterCacheNodeResult',
]
@pulumi.output_type
class ClusterCacheNode(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "availabilityZone":
suggest = "availability_zone"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterCacheNode. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterCacheNode.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterCacheNode.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
address: Optional[str] = None,
availability_zone: Optional[str] = None,
id: Optional[str] = None,
port: Optional[int] = None):
"""
:param str availability_zone: Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone. Changing this value will re-create the resource.
:param int port: The port number on which each of the cache nodes will accept connections. For Memcached the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`. Changing this value will re-create the resource.
"""
if address is not None:
pulumi.set(__self__, "address", address)
if availability_zone is not None:
pulumi.set(__self__, "availability_zone", availability_zone)
if id is not None:
pulumi.set(__self__, "id", id)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def address(self) -> Optional[str]:
return pulumi.get(self, "address")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> Optional[str]:
"""
Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `preferred_availability_zones` instead. Default: System chosen Availability Zone. Changing this value will re-create the resource.
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
The port number on which each of the cache nodes will accept connections. For Memcached the default is 11211, and for Redis the default port is 6379. Cannot be provided with `replication_group_id`. Changing this value will re-create the resource.
"""
return pulumi.get(self, "port")
@pulumi.output_type
class ParameterGroupParameter(dict):
def __init__(__self__, *,
name: str,
value: str):
"""
:param str name: The name of the ElastiCache parameter.
:param str value: The value of the ElastiCache parameter.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the ElastiCache parameter.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
"""
The value of the ElastiCache parameter.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ReplicationGroupClusterMode(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "numNodeGroups":
suggest = "num_node_groups"
elif key == "replicasPerNodeGroup":
suggest = "replicas_per_node_group"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ReplicationGroupClusterMode. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ReplicationGroupClusterMode.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ReplicationGroupClusterMode.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
num_node_groups: Optional[int] = None,
replicas_per_node_group: Optional[int] = None):
"""
:param int num_node_groups: Number of node groups (shards) for this Redis replication group. Changing this number will trigger an online resizing operation before other settings modifications. Required unless `global_replication_group_id` is set.
:param int replicas_per_node_group: Number of replica nodes in each node group. Valid values are 0 to 5. Changing this number will trigger an online resizing operation before other settings modifications.
"""
if num_node_groups is not None:
pulumi.set(__self__, "num_node_groups", num_node_groups)
if replicas_per_node_group is not None:
pulumi.set(__self__, "replicas_per_node_group", replicas_per_node_group)
@property
@pulumi.getter(name="numNodeGroups")
def num_node_groups(self) -> Optional[int]:
"""
Number of node groups (shards) for this Redis replication group. Changing this number will trigger an online resizing operation before other settings modifications. Required unless `global_replication_group_id` is set.
"""
return pulumi.get(self, "num_node_groups")
@property
@pulumi.getter(name="replicasPerNodeGroup")
def replicas_per_node_group(self) -> Optional[int]:
"""
Number of replica nodes in each node group. Valid values are 0 to 5. Changing this number will trigger an online resizing operation before other settings modifications.
"""
return pulumi.get(self, "replicas_per_node_group")
@pulumi.output_type
class GetClusterCacheNodeResult(dict):
def __init__(__self__, *,
address: str,
availability_zone: str,
id: str,
port: int):
"""
:param str availability_zone: The Availability Zone for the cache cluster.
:param int port: The port number on which each of the cache nodes will
accept connections.
"""
pulumi.set(__self__, "address", address)
pulumi.set(__self__, "availability_zone", availability_zone)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def address(self) -> str:
return pulumi.get(self, "address")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> str:
"""
The Availability Zone for the cache cluster.
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def port(self) -> int:
"""
The port number on which each of the cache nodes will
accept connections.
"""
return pulumi.get(self, "port")
| 37.466019
| 271
| 0.64965
|
7500a33b1f097b45715714c5050361c34ab4a675
| 8,262
|
py
|
Python
|
pandas/io/excel/_xlsxwriter.py
|
KneeShard/pandas
|
ce3bac9af43838c7d690ee86e9bec4976a3303e3
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-08-06T14:27:43.000Z
|
2021-08-06T14:27:56.000Z
|
pandas/io/excel/_xlsxwriter.py
|
ra1nty/pandas
|
0b68d87a4438a13f14a2ed5af2e432df02eb0b2c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/io/excel/_xlsxwriter.py
|
ra1nty/pandas
|
0b68d87a4438a13f14a2ed5af2e432df02eb0b2c
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-06-22T14:36:40.000Z
|
2021-06-22T14:36:40.000Z
|
from __future__ import annotations
from typing import Any
import pandas._libs.json as json
from pandas._typing import StorageOptions
from pandas.io.excel._base import ExcelWriter
from pandas.io.excel._util import validate_freeze_panes
class _XlsxStyler:
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
# Ordering necessary for both determinism and because some are keyed by
# prefixes of others.
STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {
"font": [
(("name",), "font_name"),
(("sz",), "font_size"),
(("size",), "font_size"),
(("color", "rgb"), "font_color"),
(("color",), "font_color"),
(("b",), "bold"),
(("bold",), "bold"),
(("i",), "italic"),
(("italic",), "italic"),
(("u",), "underline"),
(("underline",), "underline"),
(("strike",), "font_strikeout"),
(("vertAlign",), "font_script"),
(("vertalign",), "font_script"),
],
"number_format": [(("format_code",), "num_format"), ((), "num_format")],
"protection": [(("locked",), "locked"), (("hidden",), "hidden")],
"alignment": [
(("horizontal",), "align"),
(("vertical",), "valign"),
(("text_rotation",), "rotation"),
(("wrap_text",), "text_wrap"),
(("indent",), "indent"),
(("shrink_to_fit",), "shrink"),
],
"fill": [
(("patternType",), "pattern"),
(("patterntype",), "pattern"),
(("fill_type",), "pattern"),
(("start_color", "rgb"), "fg_color"),
(("fgColor", "rgb"), "fg_color"),
(("fgcolor", "rgb"), "fg_color"),
(("start_color",), "fg_color"),
(("fgColor",), "fg_color"),
(("fgcolor",), "fg_color"),
(("end_color", "rgb"), "bg_color"),
(("bgColor", "rgb"), "bg_color"),
(("bgcolor", "rgb"), "bg_color"),
(("end_color",), "bg_color"),
(("bgColor",), "bg_color"),
(("bgcolor",), "bg_color"),
],
"border": [
(("color", "rgb"), "border_color"),
(("color",), "border_color"),
(("style",), "border"),
(("top", "color", "rgb"), "top_color"),
(("top", "color"), "top_color"),
(("top", "style"), "top"),
(("top",), "top"),
(("right", "color", "rgb"), "right_color"),
(("right", "color"), "right_color"),
(("right", "style"), "right"),
(("right",), "right"),
(("bottom", "color", "rgb"), "bottom_color"),
(("bottom", "color"), "bottom_color"),
(("bottom", "style"), "bottom"),
(("bottom",), "bottom"),
(("left", "color", "rgb"), "left_color"),
(("left", "color"), "left_color"),
(("left", "style"), "left"),
(("left",), "left"),
],
}
@classmethod
def convert(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format dict
Parameters
----------
style_dict : style dictionary to convert
num_format_str : optional number format string
"""
# Create a XlsxWriter format object.
props = {}
if num_format_str is not None:
props["num_format"] = num_format_str
if style_dict is None:
return props
if "borders" in style_dict:
style_dict = style_dict.copy()
style_dict["border"] = style_dict.pop("borders")
for style_group_key, style_group in style_dict.items():
for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):
# src is a sequence of keys into a nested dict
# dst is a flat key
if dst in props:
continue
v = style_group
for k in src:
try:
v = v[k]
except (KeyError, TypeError):
break
else:
props[dst] = v
if isinstance(props.get("pattern"), str):
# TODO: support other fill patterns
props["pattern"] = 0 if props["pattern"] == "none" else 1
for k in ["border", "top", "right", "bottom", "left"]:
if isinstance(props.get(k), str):
try:
props[k] = [
"none",
"thin",
"medium",
"dashed",
"dotted",
"thick",
"double",
"hair",
"mediumDashed",
"dashDot",
"mediumDashDot",
"dashDotDot",
"mediumDashDotDot",
"slantDashDot",
].index(props[k])
except ValueError:
props[k] = 2
if isinstance(props.get("font_script"), str):
props["font_script"] = ["baseline", "superscript", "subscript"].index(
props["font_script"]
)
if isinstance(props.get("underline"), str):
props["underline"] = {
"none": 0,
"single": 1,
"double": 2,
"singleAccounting": 33,
"doubleAccounting": 34,
}[props["underline"]]
return props
class XlsxWriter(ExcelWriter):
engine = "xlsxwriter"
supported_extensions = (".xlsx",)
def __init__(
self,
path,
engine=None,
date_format=None,
datetime_format=None,
mode: str = "w",
storage_options: StorageOptions = None,
if_sheet_exists: str | None = None,
engine_kwargs: dict[str, Any] | None = None,
):
# Use the xlsxwriter module as the Excel writer.
from xlsxwriter import Workbook
engine_kwargs = engine_kwargs or {}
if mode == "a":
raise ValueError("Append mode is not supported with xlsxwriter!")
super().__init__(
path,
engine=engine,
date_format=date_format,
datetime_format=datetime_format,
mode=mode,
storage_options=storage_options,
if_sheet_exists=if_sheet_exists,
engine_kwargs=engine_kwargs,
)
self.book = Workbook(self.handles.handle, **engine_kwargs)
def save(self):
"""
Save workbook to disk.
"""
return self.book.close()
def write_cells(
self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None
):
# Write the frame cells using xlsxwriter.
sheet_name = self._get_sheet_name(sheet_name)
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.add_worksheet(sheet_name)
self.sheets[sheet_name] = wks
style_dict = {"null": None}
if validate_freeze_panes(freeze_panes):
wks.freeze_panes(*(freeze_panes))
for cell in cells:
val, fmt = self._value_with_fmt(cell.val)
stylekey = json.dumps(cell.style)
if fmt:
stylekey += fmt
if stylekey in style_dict:
style = style_dict[stylekey]
else:
style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))
style_dict[stylekey] = style
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_range(
startrow + cell.row,
startcol + cell.col,
startrow + cell.mergestart,
startcol + cell.mergeend,
val,
style,
)
else:
wks.write(startrow + cell.row, startcol + cell.col, val, style)
| 33.449393
| 82
| 0.469257
|
68ecec98fa4cfb37d6997d4f3527ccc57b100d80
| 6,076
|
py
|
Python
|
eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/hgext/relink.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | null | null | null |
eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/hgext/relink.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | null | null | null |
eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/hgext/relink.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 1
|
2021-12-16T23:31:37.000Z
|
2021-12-16T23:31:37.000Z
|
# Mercurial extension to provide 'hg relink' command
#
# Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""recreates hardlinks between repository clones"""
from mercurial import hg, util
from mercurial.i18n import _
import os, stat
def relink(ui, repo, origin=None, **opts):
"""recreate hardlinks between two repositories
When repositories are cloned locally, their data files will be
hardlinked so that they only use the space of a single repository.
Unfortunately, subsequent pulls into either repository will break
hardlinks for any files touched by the new changesets, even if
both repositories end up pulling the same changes.
Similarly, passing --rev to "hg clone" will fail to use any
hardlinks, falling back to a complete copy of the source
repository.
This command lets you recreate those hardlinks and reclaim that
wasted space.
This repository will be relinked to share space with ORIGIN, which
must be on the same local disk. If ORIGIN is omitted, looks for
"default-relink", then "default", in [paths].
Do not attempt any read operations on this repository while the
command is running. (Both repositories will be locked against
writes.)
"""
if (not util.safehasattr(util, 'samefile') or
not util.safehasattr(util, 'samedevice')):
raise util.Abort(_('hardlinks are not supported on this system'))
src = hg.repository(ui, ui.expandpath(origin or 'default-relink',
origin or 'default'))
if not src.local():
raise util.Abort(_('must specify local origin repository'))
ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
if repo.root == src.root:
ui.status(_('there is nothing to relink\n'))
return
locallock = repo.lock()
try:
remotelock = src.lock()
try:
candidates = sorted(collect(src, ui))
targets = prune(candidates, src.store.path, repo.store.path, ui)
do_relink(src.store.path, repo.store.path, targets, ui)
finally:
remotelock.release()
finally:
locallock.release()
def collect(src, ui):
seplen = len(os.path.sep)
candidates = []
live = len(src['tip'].manifest())
# Your average repository has some files which were deleted before
# the tip revision. We account for that by assuming that there are
# 3 tracked files for every 2 live files as of the tip version of
# the repository.
#
# mozilla-central as of 2010-06-10 had a ratio of just over 7:5.
total = live * 3 // 2
src = src.store.path
pos = 0
ui.status(_("tip has %d files, estimated total number of files: %s\n")
% (live, total))
for dirpath, dirnames, filenames in os.walk(src):
dirnames.sort()
relpath = dirpath[len(src) + seplen:]
for filename in sorted(filenames):
if not filename[-2:] in ('.d', '.i'):
continue
st = os.stat(os.path.join(dirpath, filename))
if not stat.S_ISREG(st.st_mode):
continue
pos += 1
candidates.append((os.path.join(relpath, filename), st))
ui.progress(_('collecting'), pos, filename, _('files'), total)
ui.progress(_('collecting'), None)
ui.status(_('collected %d candidate storage files\n') % len(candidates))
return candidates
def prune(candidates, src, dst, ui):
def linkfilter(src, dst, st):
try:
ts = os.stat(dst)
except OSError:
# Destination doesn't have this file?
return False
if util.samefile(src, dst):
return False
if not util.samedevice(src, dst):
# No point in continuing
raise util.Abort(
_('source and destination are on different devices'))
if st.st_size != ts.st_size:
return False
return st
targets = []
total = len(candidates)
pos = 0
for fn, st in candidates:
pos += 1
srcpath = os.path.join(src, fn)
tgt = os.path.join(dst, fn)
ts = linkfilter(srcpath, tgt, st)
if not ts:
ui.debug('not linkable: %s\n' % fn)
continue
targets.append((fn, ts.st_size))
ui.progress(_('pruning'), pos, fn, _('files'), total)
ui.progress(_('pruning'), None)
ui.status(_('pruned down to %d probably relinkable files\n') % len(targets))
return targets
def do_relink(src, dst, files, ui):
def relinkfile(src, dst):
bak = dst + '.bak'
os.rename(dst, bak)
try:
util.oslink(src, dst)
except OSError:
os.rename(bak, dst)
raise
os.remove(bak)
CHUNKLEN = 65536
relinked = 0
savedbytes = 0
pos = 0
total = len(files)
for f, sz in files:
pos += 1
source = os.path.join(src, f)
tgt = os.path.join(dst, f)
# Binary mode, so that read() works correctly, especially on Windows
sfp = file(source, 'rb')
dfp = file(tgt, 'rb')
sin = sfp.read(CHUNKLEN)
while sin:
din = dfp.read(CHUNKLEN)
if sin != din:
break
sin = sfp.read(CHUNKLEN)
sfp.close()
dfp.close()
if sin:
ui.debug('not linkable: %s\n' % f)
continue
try:
relinkfile(source, tgt)
ui.progress(_('relinking'), pos, f, _('files'), total)
relinked += 1
savedbytes += sz
except OSError, inst:
ui.warn('%s: %s\n' % (tgt, str(inst)))
ui.progress(_('relinking'), None)
ui.status(_('relinked %d files (%s reclaimed)\n') %
(relinked, util.bytecount(savedbytes)))
cmdtable = {
'relink': (
relink,
[],
_('[ORIGIN]')
)
}
| 32.843243
| 80
| 0.589862
|
60b6ed4146c56c34e2877f45f081aa182cac66d0
| 301
|
py
|
Python
|
data/multilingual/Latn.ENG/Sans_8/pdf_to_json_test_Latn.ENG_Sans_8.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Latn.ENG/Sans_8/pdf_to_json_test_Latn.ENG_Sans_8.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Latn.ENG/Sans_8/pdf_to_json_test_Latn.ENG_Sans_8.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.ENG/Sans_8/udhr_Latn.ENG_Sans_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.1
| 71
| 0.810631
|
f7d3765f3e2ed7efc7d6d9a598112c01dd0c633e
| 7,630
|
py
|
Python
|
openai/baselines/baselines/deepq/experiments_real/experiments_real_300_normal/train_real_cloud.py
|
habichta/ETHZDeepReinforcementLearning
|
e1ae22159753724290f20068214bb3d94fcb7be4
|
[
"BSD-3-Clause"
] | 7
|
2018-01-23T05:17:50.000Z
|
2020-10-30T02:29:59.000Z
|
openai/baselines/baselines/deepq/experiments_17_balanced_alpha09beta_reward_large_shorter/experiments_real_300_normal/train_real_cloud.py
|
habichta/ETHZDeepReinforcementLearning
|
e1ae22159753724290f20068214bb3d94fcb7be4
|
[
"BSD-3-Clause"
] | null | null | null |
openai/baselines/baselines/deepq/experiments_17_balanced_alpha09beta_reward_large_shorter/experiments_real_300_normal/train_real_cloud.py
|
habichta/ETHZDeepReinforcementLearning
|
e1ae22159753724290f20068214bb3d94fcb7be4
|
[
"BSD-3-Clause"
] | 2
|
2018-01-23T05:17:58.000Z
|
2018-07-02T00:13:34.000Z
|
import gym
from baselines import deepq
from baselines.common.atari_wrappers_deprecated import wrap_dqn, ScaledFloatFrame
from cloud_environment_real import RealCloudEnvironment
import numpy as np
import collections
import os
import csv
import random
#NUR EINE EPISODE
#Bilder normalisieren? Brightness change jan => HSV
#komplzierteres toy environment transparenz
#hard data set wenn auf eines funktionert
#Logging
def logger_callback(locals,globals):
training_started = locals['log_training_started']
done = locals['done']
num_episodes = locals['num_episodes']
if training_started:
log_action_l = locals['log_action_l'] # actions chosen in current episode step
log_action_l.append(locals['action'])
log_q_t_selected_l = locals['log_q_t_selected_l'] #selected actions in batch
log_q_t_selected_l.append(np.mean(locals['log_q_t_selected']))
log_q_t_targets_l = locals['log_q_t_targets_l_l'] #q values of actions selected by target network and double Q
log_q_t_targets_l.append(np.mean(locals['log_q_t_targets']))
"""
log_q_t_l = locals['log_q_t_l'] # q values of all actions in a batch
log_q_t_l.append(np.mean(locals['log_q_t'],axis=0))
"""
log_td_errors_l = locals['log_td_errors_l'] # difference between state 1 and next state predictions
log_td_errors_l.append(np.mean(locals['log_td_errors']))
log_errors_l = locals['log_errors_l'] # Huber loss of td errors
log_errors_l.append(np.mean(locals['log_errors']))
log_gradients_l = locals['log_gradients']
log_grad_ratio_l = locals['log_grad_ratio_l']
for grad, var in log_gradients_l:
grad_step = np.linalg.norm(grad*-locals['l_rate'])
var_norm = np.linalg.norm(var)
if var_norm > 0:
wg_ratio = grad_step / var_norm
log_grad_ratio_l.append(wg_ratio)
if done:
rew100 = locals['mean_100ep_reward']
rew50 = locals['mean_50ep_reward']
rew10 = locals['mean_10ep_reward']
logger = globals['logger']
action_counter = sorted(collections.Counter(log_action_l).items())
episode_q_t_selected = np.mean(log_q_t_selected_l) #1x1
episode_q_t_targets = np.mean(log_q_t_targets_l) #1x1
#episode_q_t = np.mean(log_q_t_l,axis=0) #1x2
episode_td_errors = np.mean(log_td_errors_l) #1x1
episode_errors = np.mean(log_errors_l) # 1x1
mean_wg_ratio = np.mean(log_grad_ratio_l) if len(log_grad_ratio_l)>0 else 0
median_wg_ratio = np.median(log_grad_ratio_l) if len(log_grad_ratio_l)>0 else 0
max_wg_ratio = np.max(log_grad_ratio_l) if len(log_grad_ratio_l)>0 else 0
min_wg_ratio = np.min(log_grad_ratio_l) if len(log_grad_ratio_l)>0 else 0
logger.record_tabular("Tmp File", locals['td'])
logger.record_tabular("Actions Count", action_counter)
logger.record_tabular("Mean selected Q",episode_q_t_selected)
logger.record_tabular("Mean selected target Q",episode_q_t_targets)
#logger.record_tabular("Mean Action Q", episode_q_t)
logger.record_tabular("Mean TD Error", episode_td_errors)
logger.record_tabular("Mean Huber Error", episode_errors)
logger.record_tabular("Var/Grad *-lr Mean ratio", mean_wg_ratio)
logger.record_tabular("Var/Grad *-lr Median ratio", median_wg_ratio)
logger.record_tabular("Var/Grad *-lr Max ratio", max_wg_ratio)
logger.record_tabular("Var/Grad *-lr Min ratio", min_wg_ratio)
logger.dump_tabular()
if num_episodes % 100 == 0:
#Write log
path = locals['train_file_path']
print("Writing episode {} log to ".format(num_episodes), path)
with open(path, 'a') as f:
action_count_header = ['action_count{}'.format(i) for i in range(len(action_counter))]
#action_q_header = ['mean_action_q{}'.format(i) for i in range(len(episode_q_t.tolist()))]
headers = ['episode','steps','reward100','reward50','reward10','mean_s_q','mean_t_q','mean_td_error','mean_h_error']
#headers = headers + action_q_header+action_count_header
headers = headers + action_count_header +['mean_wg'] +['median_wg'] +['max_wg'] +['min_wg']
steps = locals['t']
action_counts = [c for i,c in action_counter]
#actions_qs = [q for q in episode_q_t.tolist()]
#output_list = [num_episodes]+[steps]+[rew100]+[rew50]+[rew10]+[episode_q_t_selected]+[episode_q_t_targets]+[episode_td_errors]+[episode_errors]+ actions_qs+action_counts
output_list = [num_episodes] + [steps] + [rew100] + [rew50] + [rew10] + [episode_q_t_selected] + [
episode_q_t_targets] + [episode_td_errors] + [episode_errors] + action_counts +[mean_wg_ratio]+[median_wg_ratio]+[max_wg_ratio]+[min_wg_ratio]
print(headers)
print(output_list)
w = csv.writer(f)
if os.stat(path).st_size == 0:
w.writerow(headers)
w.writerow(output_list)
return False
def main():
#TODO :Test the prediction code on both environments
np.random.seed(1)
random.seed(1)
data_path="/local/habichta/data_C_int/"
mask_path="/local/habichta//img_C/cavriglia_skymask256.png"
img_path="/local/habichta/img_C/"
train_set_path ="train_list.out"
channels=3
seq_length=3
img_size=84
seq_stride=9
#TODO: Try larger lr, try mpc guidance, try hard dataset (create ne for large trainset ... ) Note that naive has small error, means most of its rewards are 0 ... (unlike toy environment)
#TODO: tehst mpc guidance
#TODO: ask jan : step size, hard data set
#TODO: prediction code (saliency, t-sne, value func on simple environment)
env = RealCloudEnvironment(data_path,img_path,train_set_path, image_size=img_size,
sequence_length=seq_length, sequence_stride=seq_stride, action_nr=7, action_type=1, ramp_step=0.1, episode_length_train=200,
file="rl_data_sp.csv",load_train_episodes="ep400_200.pkl",mask_path=mask_path,sample_training_episodes=None,exploration_follow="IRR",start_exploration_deviation=0.2,clip_irradiance=False)
#Note: cloud speed can be changes but may also require different ramps.. default, speed of cloud per frame at least 1 pixel in y direction
model = deepq.models.cnn_to_mlp(
convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
hiddens=[256],
dueling=True,
channels=channels,
seq_length=seq_length,
img_size=img_size
)
act = deepq.learn(
train_file_path='train_log.csv',
env=env,
q_func=model,
lr=2.5e-4,
max_timesteps=2000000,
buffer_size=40000,
initial_exploration=1.0,
exploration_fraction=0.1,
exploration_final_eps=0.01,
train_freq=4,
learning_starts=40000,
target_network_update_freq=1000,
gamma=0.99,
prioritized_replay=True,
callback=logger_callback,
load_cpk=None,
mpc_guidance=None
#"/media/nox/OS/Linux/Documents/Masterarbeit/simple_rl/model1/model"
)
act.save("cloud_model.pkl")
if __name__ == '__main__':
main()
| 37.401961
| 203
| 0.648493
|
61b5468ed2c70fded55fcd518503b8fe50c9f240
| 55,173
|
py
|
Python
|
digesters/confluence/test_confluence_notifications.py
|
paul-hammant/imapdigester
|
7d2d9525d39b1f3f839a219061180971404e4bb8
|
[
"MIT"
] | 25
|
2016-04-04T17:32:47.000Z
|
2022-03-08T02:18:07.000Z
|
digesters/confluence/test_confluence_notifications.py
|
paul-hammant/imapslurper
|
7d2d9525d39b1f3f839a219061180971404e4bb8
|
[
"MIT"
] | null | null | null |
digesters/confluence/test_confluence_notifications.py
|
paul-hammant/imapslurper
|
7d2d9525d39b1f3f839a219061180971404e4bb8
|
[
"MIT"
] | 4
|
2017-01-02T21:03:28.000Z
|
2022-02-22T18:38:44.000Z
|
import sys
from unittest import TestCase
import unittest
import os
from importlib import reload
from mock import Mock, call
from mockextras import stub
sys.path = [os.path.abspath(os.path.join('..', os.pardir))] + sys.path
from digesters.confluence.confluence_notification_digester import ConfluenceNotificationDigester
from digesters.digestion_processor import DigestionProcessor
MAIL_HDR = """From: \"Apache Confluence\" <ph@example.com>
Content-Transfer-Encoding: 8bit
Content-Type: multipart/alternative; boundary="---NOTIFICATION_BOUNDARY-5678"
MIME-Version: 1.0
This is a multi-part message in MIME format.
-----NOTIFICATION_BOUNDARY-5678
Content-Type: text/html; charset="utf-8"
Content-Transfer-Encoding: 8bit
"""
class NotificationsStore(object):
def __init__(self, cls=object):
self._cls = cls
self.notifications = None
def __eq__(self, other):
self.notifications = other
return True
def __ne__(self, other):
return False
def __repr__(self):
return "NotificationsStore(..)"
class TestConfluenceNotifications(TestCase):
def __init__(self, methodName='runTest'):
super(TestConfluenceNotifications, self).__init__(methodName)
reload(sys)
# sys.setdefaultencoding('utf8')
def test_two_related_notifications_can_be_rolled_up(self):
expected_payload = """<html><body><span>You have previously read notifications up to: Apr 09 2016 02:37 AM</span>
<table>
<tr style="background-color: #acf;">
<th>Notifications</th>
</tr>
<tr style="">
<td>
What: Noble Paul edited a page<br/>
Space: solr:<br/>
Page: <a href="https://cwiki.apache.org/confluence/display/solr/Config+API">Config API</a><br/>
Excerpt: Page nodes added: 4, removed: 0, changed: 0
</td>
</tr> <tr style="background-color: #def;">
<td>
What: Hoss Man deleted a comment<br/>
Space: solr:<br/>
Page: <a href="https://cwiki.apache.org/confluence/display/solr/Getting+Started?focusedCommentId=62691549">Re: Getting Started</a><br/>
Excerpt: definitely a great post jadibd.com
</td>
</tr> <tr><td colspan="2" style="border-bottom: 1pt solid red; border-top: 1pt solid red;"><center>^ New Notifications Since You Last Checked ^</center></td></tr> <tr style="">
<td>
What: surya ferdy commented on a page<br/>
Space: solr:<br/>
Page: <a href="https://cwiki.apache.org/confluence/display/solr/Getting+Started?focusedCommentId=62691549">Re: Getting Started</a><br/>
Excerpt: definitely a great post jadibd.com
</td>
</tr>
</table></body></html>"""
notification_store = {}
final_notifications_store = NotificationsStore()
store_writer = Mock()
store_writer.get_from_binary.side_effect = stub(
(call('confluence-notifications'), notification_store),
(call('most-recently-seen'), 1460183824)
)
store_writer.store_as_binary.side_effect = stub(
(call('confluence-notifications', final_notifications_store), True),
(call('most-recently-seen', 1460183824), True)
)
expected_message = ("Subject: Notification Digest: 1 new notification(s)\n"
+ MAIL_HDR + expected_payload + "\n\n-----NOTIFICATION_BOUNDARY-5678")
digest_inbox_proxy = Mock()
digest_inbox_proxy.delete_previous_message.side_effect = stub((call(), True))
digest_inbox_proxy.append.side_effect = stub((call(expected_message), True))
digesters = []
digester = ConfluenceNotificationDigester(store_writer, "confluence@apache.org", "Apache") ## What we are testing
digester.notification_boundary_rand = "-5678" # no random number for the email's notification boundary
digesters.append(digester)
digestion_processor = DigestionProcessor(None, None, digesters, False, "ph@example.com", False, "INBOX")
unmatched_to_move = []
to_delete_from_notification_folder = []
digestion_processor.process_incoming_notification(1234, digesters, COMMENT_ADDED, to_delete_from_notification_folder, unmatched_to_move, False)
digestion_processor.process_incoming_notification(1235, digesters, COMMENT_DELETED, to_delete_from_notification_folder, unmatched_to_move, False)
digestion_processor.process_incoming_notification(1236, digesters, PAGE_EDITED, to_delete_from_notification_folder, unmatched_to_move, False)
digester.rewrite_digest_emails(digest_inbox_proxy, has_previous_message=True,
previously_seen=False, sender_to_implicate="ph@example.com")
self.assertEqual(digest_inbox_proxy.mock_calls, [call.delete_previous_message(), call.append(expected_message)])
calls = store_writer.mock_calls
self.assertEqual(calls, [
call.get_from_binary('confluence-notifications'),
call.get_from_binary('most-recently-seen'),
call.store_as_binary('confluence-notifications', {
1460183824: {'space': 'solr',
'line_here': True,
'who': 'surya ferdy',
'excerpt': 'definitely a great post jadibd.com',
'doc_text': 'Re: Getting Started',
'doc_url': 'https://cwiki.apache.org/confluence/display/solr/Getting+Started?focusedCommentId=62691549',
'event': 'surya ferdy commented on a page'},
1460400060: {'space': 'solr',
'who': 'Hoss Man',
'excerpt': 'definitely a great post jadibd.com',
'doc_text': 'Re: Getting Started',
'doc_url': 'https://cwiki.apache.org/confluence/display/solr/Getting+Started?focusedCommentId=62691549',
'event': 'Hoss Man deleted a comment'},
1460535327: {'space': 'solr',
'who': 'Noble Paul',
'excerpt': 'Page nodes added: 4, removed: 0, changed: 0',
'doc_text': 'Config API',
'doc_url': 'https://cwiki.apache.org/confluence/display/solr/Config+API',
'event': 'Noble Paul edited a page'}
}),
call.store_as_binary('most-recently-seen', 1460183824)])
self.assertEqual(len(unmatched_to_move), 0)
self.assertEqual(str(to_delete_from_notification_folder), "[1234, 1235, 1236]")
self.assertEqual(len(final_notifications_store.notifications), 3)
COMMENT_ADDED = """Date: Sat, 9 Apr 2016 06:37:04 +0000
From: "surya ferdy (Confluence)" <confluence@apache.org>
To: <paul_hamm@example.com>
Message-ID: <731544647.1144503.1460183824932.JavaMail.confluence@cwiki-vm4>
Subject: [CONF] Apache Solr Reference Guide > Getting Started
Content-Type: multipart/related;
boundary="----=_Part_1144502_1498730838.1460183824926"
X-ATLASSIAN-NOTIFICATIONS-VERSION: 6.4.0
Auto-Submitted: auto-generated
Precedence: bulk
Content-Type: text/html; charset="UTF-8"
Return-Path: confluence@apache.org
MIME-Version: 1.0
------=_Part_1144502_1498730838.1460183824926
Content-Type: text/html; charset="UTF-8"
Content-Transfer-Encoding: quoted-printable
Content-Disposition: inline
<html><head>
<meta http-equiv=3D"Content-Type" content=3D"text/html; charset=3Dutf-8">=20
<meta name=3D"viewport" content=3D"width=3Ddevice-width, initial-scale=3D1.=
0, maximum-scale=3D1.0">=20
<base href=3D"https://cwiki.apache.org/confluence">=20
<title>Message Title</title> =20
</head>
<body>
<table id=3D"background-table" cellpadding=3D"0" cellspacing=3D"0" width=3D=
"100%" style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table=
-rspace: 0pt; color: #333; background-color: #f5f5f5">=20
<tbody>=20
<tr>=20
<td id=3D"header-pattern-container" style=3D"padding: 0px; border-collapse:=
collapse; padding: 10px 20px">=20
<table id=3D"header-pattern" cellspacing=3D"0" cellpadding=3D"0" border=3D"=
0" style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table-rsp=
ace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td id=3D"header-avatar-image-container" valign=3D"top" style=3D"padding: 0=
px; border-collapse: collapse; vertical-align: top; width: 32px; padding-ri=
ght: 9px"><a href=3D"https://cwiki.apache.org/confluence/display/~banyuitua=
ir19?src=3Dmail&src.mail.timestamp=3D1460183824702&src.mail.notific=
ation=3Dcom.atlassian.confluence.plugins.confluence-content-notifications-p=
lugin%3Acomment-created-notification&src.mail.recipient=3D8aa980874ebf4=
c1f0153f8e84cef212a" style=3D"color: #3b73af; text-decoration: none"><img i=
d=3D"header-avatar-image" class=3D"image_fix" src=3D"cid:avatar_c9ee7c28225=
bd8d9b71f70f64bf2c5a4" height=3D"32" width=3D"32" border=3D"0" style=3D"bor=
der-radius: 3px; vertical-align: top"></a></td>
<td id=3D"header-text-container" valign=3D"middle" style=3D"padding: 0px; b=
order-collapse: collapse; vertical-align: middle; font-family: Arial, sans-=
serif; font-size: 14px; line-height: 20px; mso-line-height-rule: exactly; m=
so-text-raise: 1px">surya ferdy <strong>commented</strong> on a page</td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<!-- End Header pattern -->=20
<tr>=20
<td id=3D"email-content-container" style=3D"padding: 0px; border-collapse: =
collapse; padding: 0 20px">=20
<table id=3D"email-content-table" cellspacing=3D"0" cellpadding=3D"0" borde=
r=3D"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table-lspace=
: 0pt; mso-table-rspace: 0pt; color: #333; border-spacing: 0; border-collap=
se: separate">=20
<tbody>=20
<tr>=20
<td class=3D"email-content-rounded-top mobile-expand" style=3D"padding: 0px=
; border-collapse: collapse; color: #fff; padding: 0 15px 0 16px; height: 1=
5px; background-color: #fff; border-left: 1px solid #ccc; border-top: 1px s=
olid #ccc; border-right: 1px solid #ccc; border-bottom: 0; border-top-right=
-radius: 5px; border-top-left-radius: 5px"> </td>=20
</tr>=20
<tr>=20
<td class=3D"email-content-main mobile-expand" style=3D"padding: 0px; borde=
r-collapse: collapse; border-left: 1px solid #ccc; border-right: 1px solid =
#ccc; border-top: 0; border-bottom: 0; padding: 0 15px 15px 16px; backgroun=
d-color: #fff">=20
<table id=3D"page-title-pattern" cellspacing=3D"0" cellpadding=3D"0" border=
=3D"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table-lspace:=
0pt; mso-table-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td id=3D"page-title-pattern-icon-image-container" valign=3D"top" style=3D"=
padding: 0px; border-collapse: collapse; width: 16px; vertical-align: top">=
=20
<table cellspacing=3D"0" cellpadding=3D"0" border=3D"0" style=3D"border-col=
lapse: collapse; mso-table-lspace: 0pt; mso-table-rspace: 0pt; color: #333"=
>=20
<tbody>=20
<tr>=20
<td id=3D"page-title-pattern-icon-image-container-cell" style=3D"padding: 0=
px; border-collapse: collapse; width: 16px; padding: 9px 8px 0px 0px; mso-t=
ext-raise: 5px; mso-line-height-rule: exactly"><a href=3D"https://cwiki.apa=
che.org/confluence/display/solr/Getting+Started?focusedCommentId=3D6269=
1549&src=3Dmail&src.mail.timestamp=3D1460183824702&src.mail.not=
ification=3Dcom.atlassian.confluence.plugins.confluence-content-notificatio=
ns-plugin%3Acomment-created-notification&src.mail.recipient=3D8aa980874=
ebf4c1f0153f8e84cef212a&src.mail.action=3Dview#comment-62691549" title=
=3D"comment icon" style=3D"vertical-align: top;; color: #3b73af; text-decor=
ation: none"><img style=3D"vertical-align: top; display: block;" src=3D"cid=
:comment-icon" alt=3D"comment icon" title=3D"comment icon" height=3D"16" wi=
dth=3D"16" border=3D"0"></a></td>=20
</tr>=20
</tbody>=20
</table> </td>
<td style=3D"vertical-align: top;; padding: 0px; border-collapse: collapse;=
padding-right: 5px; font-size: 20px; line-height: 30px; mso-line-height-ru=
le: exactly" id=3D"page-title-pattern-header-container"><span id=3D"page-ti=
tle-pattern-header" style=3D"font-family: Arial, sans-serif; padding: 0; fo=
nt-size: 20px; line-height: 30px; mso-text-raise: 2px; mso-line-height-rule=
: exactly; vertical-align: middle"><a href=3D"https://cwiki.apache.org/conf=
luence/display/solr/Getting+Started?focusedCommentId=3D62691549&src=
=3Dmail&src.mail.timestamp=3D1460183824702&src.mail.notification=3D=
com.atlassian.confluence.plugins.confluence-content-notifications-plugin%3A=
comment-created-notification&src.mail.recipient=3D8aa980874ebf4c1f0153f=
8e84cef212a&src.mail.action=3Dview#comment-62691549" title=3D"Re: Getti=
ng Started" style=3D"color: #3b73af; text-decoration: none">Re: Getting Sta=
rted</a></span></td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td class=3D"email-content-main mobile-expand" style=3D"padding: 0px; borde=
r-collapse: collapse; border-left: 1px solid #ccc; border-right: 1px solid =
#ccc; border-top: 0; border-bottom: 0; padding: 0 15px 15px 16px; backgroun=
d-color: #fff">=20
<table class=3D"content-excerpt-pattern" cellspacing=3D"0" cellpadding=3D"0=
" border=3D"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table=
-lspace: 0pt; mso-table-rspace: 0pt; color: #333; font-family: Arial, sans-=
serif; font-size: 14px; line-height: 20px; mso-line-height-rule: exactly; m=
so-text-raise: 1px">=20
<tbody>=20
<tr>=20
<td class=3D"content-excerpt-pattern-container mobile-resize-text " style=
=3D"padding: 0px; border-collapse: collapse; padding: 0 0 0 24px"> <p style=
=3D"margin: 10px 0 0 0; margin-top: 0">definitely a great post</p> <p style=
=3D"margin: 10px 0 0 0"> <a href=3D"http://www.jadibd.com" class=3D"externa=
l-link" rel=3D"nofollow" style=3D"color: #3b73af; text-decoration: none">ja=
dibd.com</a><a rel=3D"nofollow" style=3D"color: #3b73af; text-decoration: n=
one"></a> </p> </td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td class=3D"email-content-main mobile-expand action-padding last-row-paddi=
ng" style=3D"padding: 0px; border-collapse: collapse; border-left: 1px soli=
d #ccc; border-right: 1px solid #ccc; border-top: 0; border-bottom: 0; padd=
ing: 0 15px 15px 16px; background-color: #fff; padding-bottom: 10px; paddin=
g-bottom: 10px">=20
<table id=3D"actions-pattern" cellspacing=3D"0" cellpadding=3D"0" border=3D=
"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table-lspace: 0p=
t; mso-table-rspace: 0pt; color: #333; font-family: Arial, sans-serif; font=
-size: 14px; line-height: 20px; mso-line-height-rule: exactly; mso-text-rai=
se: 1px">=20
<tbody>=20
<tr>=20
<td id=3D"actions-pattern-container" valign=3D"middle" style=3D"padding: 0p=
x; border-collapse: collapse; padding: 15px 0 0 24px; vertical-align: middl=
e">=20
<table align=3D"left" style=3D"border-collapse: collapse; mso-table-lspace:=
0pt; mso-table-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td class=3D"actions-pattern-action-icon-container" style=3D"padding: 0px; =
border-collapse: collapse; font-family: Arial, sans-serif; font-size: 14px;=
line-height: 20px; mso-line-height-rule: exactly; mso-text-raise: 0px; ver=
tical-align: middle"><a href=3D"https://cwiki.apache.org/confluence/display=
/solr/Getting+Started?replyToComment=3D62691549&src=3Dmail&src.=
mail.timestamp=3D1460183824702&src.mail.notification=3Dcom.atlassian.co=
nfluence.plugins.confluence-content-notifications-plugin%3Acomment-created-=
notification&src.mail.recipient=3D8aa980874ebf4c1f0153f8e84cef212a&=
src.mail.action=3Dreply#comment-62691549" title=3D"Reply Icon" style=3D"col=
or: #3b73af; text-decoration: none"><img class=3D"actions-pattern-action-ic=
on-image" height=3D"16" width=3D"16" border=3D"0" title=3D"Reply Icon" src=
=3D"cid:com.atlassian.confluence.plugins.confluence-email-resources%3Areply=
-to-comment-email-adg-footer-item%3Aicon" alt=3D"Reply Icon" style=3D"verti=
cal-align: middle"></a></td>
<td class=3D"actions-pattern-action-text-container" style=3D"padding: 0px; =
border-collapse: collapse; font-family: Arial, sans-serif; font-size: 14px;=
line-height: 20px; mso-line-height-rule: exactly; mso-text-raise: 4px; pad=
ding-left: 5px; white-space: nowrap"><a href=3D"https://cwiki.apache.org/co=
nfluence/display/solr/Getting+Started?replyToComment=3D62691549&src=
=3Dmail&src.mail.timestamp=3D1460183824702&src.mail.notification=3D=
com.atlassian.confluence.plugins.confluence-content-notifications-plugin%3A=
comment-created-notification&src.mail.recipient=3D8aa980874ebf4c1f0153f=
8e84cef212a&src.mail.action=3Dreply#comment-62691549" title=3D"Reply" s=
tyle=3D"color: #3b73af; text-decoration: none">Reply</a></td>
<td class=3D"actions-pattern-action-bull" style=3D"padding: 0px; border-col=
lapse: collapse; font-family: Arial, sans-serif; font-size: 14px; line-heig=
ht: 20px; mso-line-height-rule: exactly; mso-text-raise: 4px; color: #999; =
padding: 0 5px">=E2=80=A2</td>=20
</tr>=20
</tbody>=20
</table>=20
<table style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table=
-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td class=3D"actions-pattern-action-icon-container" style=3D"padding: 0px; =
border-collapse: collapse; font-family: Arial, sans-serif; font-size: 14px;=
line-height: 20px; mso-line-height-rule: exactly; mso-text-raise: 0px; ver=
tical-align: middle"><a href=3D"https://cwiki.apache.org/confluence/plugins=
/likes/like.action?contentId=3D62691549&src=3Dmail&src.mail.timesta=
mp=3D1460183824702&src.mail.notification=3Dcom.atlassian.confluence.plu=
gins.confluence-content-notifications-plugin%3Acomment-created-notification=
&src.mail.recipient=3D8aa980874ebf4c1f0153f8e84cef212a&src.mail.act=
ion=3Dlike" title=3D"Like Icon" style=3D"color: #3b73af; text-decoration: n=
one"><img class=3D"actions-pattern-action-icon-image" height=3D"16" width=
=3D"16" border=3D"0" title=3D"Like Icon" src=3D"cid:com.atlassian.confluenc=
e.plugins.confluence-like%3Aview-email-adg-content-item%3Aicon" alt=3D"Like=
Icon" style=3D"vertical-align: middle"></a></td>
<td class=3D"actions-pattern-action-text-container" style=3D"padding: 0px; =
border-collapse: collapse; font-family: Arial, sans-serif; font-size: 14px;=
line-height: 20px; mso-line-height-rule: exactly; mso-text-raise: 4px; pad=
ding-left: 5px; white-space: nowrap"><a href=3D"https://cwiki.apache.org/co=
nfluence/plugins/likes/like.action?contentId=3D62691549&src=3Dmail&=
src.mail.timestamp=3D1460183824702&src.mail.notification=3Dcom.atlassia=
n.confluence.plugins.confluence-content-notifications-plugin%3Acomment-crea=
ted-notification&src.mail.recipient=3D8aa980874ebf4c1f0153f8e84cef212a&=
amp;src.mail.action=3Dlike" title=3D"Like" style=3D"color: #3b73af; text-de=
coration: none">Like</a></td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td class=3D"email-content-rounded-bottom mobile-expand" style=3D"padding: =
0px; border-collapse: collapse; color: #fff; height: 5px; line-height: 5px;=
padding: 0 15px 0 16px; background-color: #fff; border-bottom-right-radius=
: 5px; border-bottom-left-radius: 5px; border-top: 0; border-left: 1px soli=
d #ccc; border-bottom: 1px solid #ccc; border-right: 1px solid #ccc; mso-li=
ne-height-rule: exactly"> </td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td id=3D"footer-pattern" style=3D"padding: 0px; border-collapse: collapse;=
padding: 12px 20px">=20
<table id=3D"footer-pattern-container" cellspacing=3D"0" cellpadding=3D"0" =
border=3D"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table-l=
space: 0pt; mso-table-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td id=3D"footer-pattern-links-container" width=3D"100%" style=3D"padding: =
0px; border-collapse: collapse; color: #999; font-size: 12px; line-height: =
18px; font-family: Arial, sans-serif; mso-line-height-rule: exactly; mso-te=
xt-raise: 2px">=20
<table align=3D"left" style=3D"border-collapse: collapse; mso-table-lspace:=
0pt; mso-table-rspace: 0pt; color: #333; font-size: 12px; line-height: 18p=
x; font-family: Arial, sans-serif; mso-line-height-rule: exactly; mso-text-=
raise: 2px">=20
<tbody>=20
<tr>=20
<td class=3D"footer-pattern-links mobile-resize-text" style=3D"padding: 0px=
; border-collapse: collapse"><a href=3D"https://cwiki.apache.org/confluence=
/users/removespacenotification.action?spaceKey=3Dsolr&src=3Dmail&sr=
c.mail.timestamp=3D1460183824702&src.mail.notification=3Dcom.atlassian.=
confluence.plugins.confluence-content-notifications-plugin%3Acomment-create=
d-notification&src.mail.recipient=3D8aa980874ebf4c1f0153f8e84cef212a&am=
p;src.mail.action=3Dstop-watching" title=3D"" style=3D"color: #3b73af; text=
-decoration: none">Stop watching space</a></td>
<td class=3D"footer-pattern-links-bull" style=3D"padding: 0px; border-colla=
pse: collapse; padding: 0 5px; color: #999">=E2=80=A2</td>=20
</tr>=20
</tbody>=20
</table>=20
<table style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table=
-rspace: 0pt; color: #333; font-size: 12px; line-height: 18px; font-family:=
Arial, sans-serif; mso-line-height-rule: exactly; mso-text-raise: 2px">=20
<tbody>=20
<tr>=20
<td class=3D"footer-pattern-links mobile-resize-text" style=3D"padding: 0px=
; border-collapse: collapse"><a href=3D"https://cwiki.apache.org/confluence=
/users/editmyemailsettings.action?src=3Dmail&src.mail.timestamp=3D14601=
83824702&src.mail.notification=3Dcom.atlassian.confluence.plugins.confl=
uence-content-notifications-plugin%3Acomment-created-notification&src.m=
ail.recipient=3D8aa980874ebf4c1f0153f8e84cef212a&src.mail.action=3Dmana=
ge" title=3D"" style=3D"color: #3b73af; text-decoration: none">Manage notif=
ications</a></td>=20
</tr>=20
</tbody>=20
</table> </td>
<td id=3D"footer-pattern-logo-desktop-container" rowspan=3D"2" valign=3D"to=
p" style=3D"padding: 0px; border-collapse: collapse; padding-left: 20px; ve=
rtical-align: top">=20
<table style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table=
-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td id=3D"footer-pattern-logo-desktop-padding" style=3D"padding: 0px; borde=
r-collapse: collapse; padding-top: 3px"><img id=3D"footer-pattern-logo-desk=
top" src=3D"cid:footer-desktop-logo" alt=3D"Confluence logo big" title=3D"C=
onfluence logo big" width=3D"132" height=3D"20" class=3D"image_fix"></td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td id=3D"footer-pattern-text" class=3D"mobile-resize-text" width=3D"100%" =
style=3D"padding: 0px; border-collapse: collapse; color: #999; font-size: 1=
2px; line-height: 18px; font-family: Arial, sans-serif; mso-line-height-rul=
e: exactly; mso-text-raise: 2px; display: none">This message was sent by At=
lassian Confluence 5.8.4<br> <img id=3D"footer-pattern-logo-mobile" src=3D"=
cid:footer-mobile-logo" alt=3D"" title=3D"" width=3D"0" height=3D"0" style=
=3D"display: none; mso-hide: all"></td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
</tbody>=20
</table>=20
<table id=3D"sealed-section" border=3D"0" cellpadding=3D"0" cellspacing=3D"=
0" width=3D"0" style=3D"border-collapse: collapse; mso-table-lspace: 0pt; m=
so-table-rspace: 0pt; color: #333; display: none">=20
<tbody>=20
<tr>=20
<td style=3D"padding: 0px; border-collapse: collapse; border: 0; font-size:=
0px; line-height: 0; mso-line-height-rule: exactly"></td>=20
</tr>=20
</tbody>=20
</table>
</body>
</html>=
------=_Part_1144502_1498730838.1460183824926
"""
COMMENT_DELETED = """Date: Mon, 11 Apr 2016 18:41:00 +0000
From: "Hoss Man (Confluence)" <confluence@apache.org>
To: <paul_hamm@example.com>
Message-ID: <contentId-62691549-1586493860@confluence.localhost>
Subject: [CONF] Apache Solr Reference Guide > Getting Started
Content-Type: multipart/related;
boundary="----=_Part_1152161_850119877.1460400060674"
Auto-Submitted: auto-generated
Precedence: bulk
Return-Path: confluence@apache.org
MIME-Version: 1.0
------=_Part_1152161_850119877.1460400060674
Content-Type: text/html; charset="UTF-8"
Content-Transfer-Encoding: quoted-printable
Content-Disposition: inline
<html><head>
<meta http-equiv=3D"Content-Type" content=3D"text/html; charset=3Dutf-8">=20
<meta name=3D"viewport" content=3D"width=3Ddevice-width, initial-scale=3D1.=
0, maximum-scale=3D1.0">=20
<base href=3D"https://cwiki.apache.org/confluence">=20
<title>Message Title</title> =20
</head>
<body>
<table id=3D"background-table" cellpadding=3D"0" cellspacing=3D"0" width=3D=
"100%" style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table=
-rspace: 0pt; color: #333; background-color: #f5f5f5">=20
<tbody>=20
<tr>=20
<td id=3D"header-pattern-container" style=3D"padding: 0px; border-collapse:=
collapse; padding: 10px 20px">=20
<table id=3D"header-pattern" cellspacing=3D"0" cellpadding=3D"0" border=3D"=
0" style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table-rsp=
ace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td id=3D"header-avatar-image-container" valign=3D"top" style=3D"padding: 0=
px; border-collapse: collapse; vertical-align: top; width: 32px; padding-ri=
ght: 9px"><a href=3D"https://cwiki.apache.org/confluence/display/~hossman?s=
rc=3Demail" style=3D"color: #3b73af; text-decoration: none"><img id=3D"head=
er-avatar-image" class=3D"image_fix" src=3D"cid:avatar_571812907fb6dbb71722=
da66de4232ab" height=3D"32" width=3D"32" border=3D"0" style=3D"border-radiu=
s: 3px; vertical-align: top"></a></td>
<td id=3D"header-text-container" valign=3D"middle" style=3D"padding: 0px; b=
order-collapse: collapse; vertical-align: middle; font-family: Arial, sans-=
serif; font-size: 14px; line-height: 20px; mso-line-height-rule: exactly; m=
so-text-raise: 1px">Hoss Man <strong>deleted</strong> a comment</td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<!-- End Header pattern -->=20
<tr>=20
<td id=3D"email-content-container" style=3D"padding: 0px; border-collapse: =
collapse; padding: 0 20px">=20
<table id=3D"email-content-table" cellspacing=3D"0" cellpadding=3D"0" borde=
r=3D"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table-lspace=
: 0pt; mso-table-rspace: 0pt; color: #333; border-spacing: 0; border-collap=
se: separate">=20
<tbody>=20
<tr>=20
<td class=3D"email-content-rounded-top mobile-expand" style=3D"padding: 0px=
; border-collapse: collapse; color: #fff; padding: 0 15px 0 16px; height: 1=
5px; background-color: #fff; border-left: 1px solid #ccc; border-top: 1px s=
olid #ccc; border-right: 1px solid #ccc; border-bottom: 0; border-top-right=
-radius: 5px; border-top-left-radius: 5px"> </td>=20
</tr>=20
<tr>=20
<td class=3D"email-content-main mobile-expand" style=3D"padding: 0px; borde=
r-collapse: collapse; border-left: 1px solid #ccc; border-right: 1px solid =
#ccc; border-top: 0; border-bottom: 0; padding: 0 15px 15px 16px; backgroun=
d-color: #fff">=20
<table id=3D"page-title-pattern" cellspacing=3D"0" cellpadding=3D"0" border=
=3D"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table-lspace:=
0pt; mso-table-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td id=3D"page-title-pattern-icon-image-container" valign=3D"top" style=3D"=
padding: 0px; border-collapse: collapse; width: 16px; vertical-align: top">=
=20
<table cellspacing=3D"0" cellpadding=3D"0" border=3D"0" style=3D"border-col=
lapse: collapse; mso-table-lspace: 0pt; mso-table-rspace: 0pt; color: #333"=
>=20
<tbody>=20
<tr>=20
<td id=3D"page-title-pattern-icon-image-container-cell" style=3D"padding: 0=
px; border-collapse: collapse; width: 16px; padding: 9px 8px 0px 0px; mso-t=
ext-raise: 5px; mso-line-height-rule: exactly"><a href=3D"https://cwiki.apa=
che.org/confluence/display/solr/Getting+Started?focusedCommentId=3D6269=
1549#comment-62691549" title=3D"comment icon" style=3D"vertical-align: top;=
; color: #3b73af; text-decoration: none"><img style=3D"vertical-align: top;=
display: block;" src=3D"cid:comment-icon" alt=3D"comment icon" title=3D"co=
mment icon" height=3D"16" width=3D"16" border=3D"0"></a></td>=20
</tr>=20
</tbody>=20
</table> </td>
<td style=3D"vertical-align: top;; padding: 0px; border-collapse: collapse;=
padding-right: 5px; font-size: 20px; line-height: 30px; mso-line-height-ru=
le: exactly" id=3D"page-title-pattern-header-container"><span id=3D"page-ti=
tle-pattern-header" class=3D"content-deleted-color" style=3D"font-family: A=
rial, sans-serif; padding: 0; font-size: 20px; line-height: 30px; mso-text-=
raise: 2px; mso-line-height-rule: exactly; vertical-align: middle; color: #=
707070"><a href=3D"https://cwiki.apache.org/confluence/display/solr/Getting=
+Started?focusedCommentId=3D62691549#comment-62691549" title=3D"Re: Get=
ting Started" style=3D"color: #3b73af; text-decoration: none; color: #70707=
0">Re: Getting Started</a></span> <span class=3D"content-deleted-lozen=
ge" style=3D"font-family: Arial, sans-serif; background-color: #d04437; col=
or: #fff; border-radius: 3px; font-size: 11px; font-weight: bold; mso-line-=
height-rule: exactly; line-height: 11px; text-align: center; vertical-align=
: middle; border: 2px solid #d04437; border-right-width: 4px; border-left-w=
idth: 4px; mso-text-raise: 2px; mso-border-alt: solid #d04437 2px; mso-bord=
er-alt: solid #d04437 4px">DELETED</span></td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td class=3D"email-content-main mobile-expand last-row-padding" style=3D"pa=
dding: 0px; border-collapse: collapse; border-left: 1px solid #ccc; border-=
right: 1px solid #ccc; border-top: 0; border-bottom: 0; padding: 0 15px 15p=
x 16px; background-color: #fff; padding-bottom: 10px">=20
<table class=3D"content-excerpt-pattern" cellspacing=3D"0" cellpadding=3D"0=
" border=3D"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table=
-lspace: 0pt; mso-table-rspace: 0pt; color: #333; font-family: Arial, sans-=
serif; font-size: 14px; line-height: 20px; mso-line-height-rule: exactly; m=
so-text-raise: 1px">=20
<tbody>=20
<tr>=20
<td class=3D"content-excerpt-pattern-container mobile-resize-text content-d=
eleted-color" style=3D"padding: 0px; border-collapse: collapse; padding: 0 =
0 0 24px; color: #707070"> <p style=3D"margin: 10px 0 0 0; margin-top: 0">d=
efinitely a great post</p> <p style=3D"margin: 10px 0 0 0"> <a href=3D"http=
://www.jadibd.com" class=3D"external-link" rel=3D"nofollow" style=3D"color:=
#3b73af; text-decoration: none; color: #707070">jadibd.com</a><a rel=3D"no=
follow" style=3D"color: #3b73af; text-decoration: none; color: #707070"></a=
> </p> </td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td class=3D"email-content-rounded-bottom mobile-expand" style=3D"padding: =
0px; border-collapse: collapse; color: #fff; height: 5px; line-height: 5px;=
padding: 0 15px 0 16px; background-color: #fff; border-bottom-right-radius=
: 5px; border-bottom-left-radius: 5px; border-top: 0; border-left: 1px soli=
d #ccc; border-bottom: 1px solid #ccc; border-right: 1px solid #ccc; mso-li=
ne-height-rule: exactly"> </td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td id=3D"footer-pattern" style=3D"padding: 0px; border-collapse: collapse;=
padding: 12px 20px">=20
<table id=3D"footer-pattern-container" cellspacing=3D"0" cellpadding=3D"0" =
border=3D"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table-l=
space: 0pt; mso-table-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td id=3D"footer-pattern-links-container" width=3D"100%" style=3D"padding: =
0px; border-collapse: collapse; color: #999; font-size: 12px; line-height: =
18px; font-family: Arial, sans-serif; mso-line-height-rule: exactly; mso-te=
xt-raise: 2px">=20
<table align=3D"left" style=3D"border-collapse: collapse; mso-table-lspace:=
0pt; mso-table-rspace: 0pt; color: #333; font-size: 12px; line-height: 18p=
x; font-family: Arial, sans-serif; mso-line-height-rule: exactly; mso-text-=
raise: 2px">=20
<tbody>=20
<tr>=20
<td class=3D"footer-pattern-links mobile-resize-text" style=3D"padding: 0px=
; border-collapse: collapse"><a href=3D"https://cwiki.apache.org/confluence=
/users/removespacenotification.action?spaceKey=3Dsolr" title=3D"" style=3D"=
color: #3b73af; text-decoration: none">Stop watching space</a></td>
<td class=3D"footer-pattern-links-bull" style=3D"padding: 0px; border-colla=
pse: collapse; padding: 0 5px; color: #999">=E2=80=A2</td>=20
</tr>=20
</tbody>=20
</table>=20
<table style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table=
-rspace: 0pt; color: #333; font-size: 12px; line-height: 18px; font-family:=
Arial, sans-serif; mso-line-height-rule: exactly; mso-text-raise: 2px">=20
<tbody>=20
<tr>=20
<td class=3D"footer-pattern-links mobile-resize-text" style=3D"padding: 0px=
; border-collapse: collapse"><a href=3D"https://cwiki.apache.org/confluence=
/users/editmyemailsettings.action" title=3D"" style=3D"color: #3b73af; text=
-decoration: none">Manage notifications</a></td>=20
</tr>=20
</tbody>=20
</table> </td>
<td id=3D"footer-pattern-logo-desktop-container" rowspan=3D"2" valign=3D"to=
p" style=3D"padding: 0px; border-collapse: collapse; padding-left: 20px; ve=
rtical-align: top">=20
<table style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table=
-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td id=3D"footer-pattern-logo-desktop-padding" style=3D"padding: 0px; borde=
r-collapse: collapse; padding-top: 3px"><img id=3D"footer-pattern-logo-desk=
top" src=3D"cid:footer-desktop-logo" alt=3D"Confluence logo big" title=3D"C=
onfluence logo big" width=3D"132" height=3D"20" class=3D"image_fix"></td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td id=3D"footer-pattern-text" class=3D"mobile-resize-text" width=3D"100%" =
style=3D"padding: 0px; border-collapse: collapse; color: #999; font-size: 1=
2px; line-height: 18px; font-family: Arial, sans-serif; mso-line-height-rul=
e: exactly; mso-text-raise: 2px; display: none"> This message was sent by A=
tlassian Confluence 5.8.4 <br> <img id=3D"footer-pattern-logo-mobile" src=
=3D"cid:footer-mobile-logo" alt=3D"" title=3D"" width=3D"0" height=3D"0" st=
yle=3D"display: none; mso-hide: all"></td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
</tbody>=20
</table>=20
<table id=3D"sealed-section" border=3D"0" cellpadding=3D"0" cellspacing=3D"=
0" width=3D"0" style=3D"border-collapse: collapse; mso-table-lspace: 0pt; m=
so-table-rspace: 0pt; color: #333; display: none">=20
<tbody>=20
<tr>=20
<td style=3D"padding: 0px; border-collapse: collapse; border: 0; font-size:=
0px; line-height: 0; mso-line-height-rule: exactly"></td>=20
</tr>=20
</tbody>=20
</table>
</body>
</html>=
------=_Part_1152161_850119877.1460400060674--
"""
PAGE_EDITED = """Date: Wed, 13 Apr 2016 08:15:27 +0000
From: "Noble Paul (Confluence)" <confluence@apache.org>
To: <paul_hamm@example.com>
Message-ID: <1334689970.1157938.1460535327926.JavaMail.confluence@cwiki-vm4>
Subject: [CONF] Apache Solr Reference Guide > Config API
Content-Type: multipart/related;
boundary="----=_Part_1157937_100703508.1460535327916"
X-ATLASSIAN-NOTIFICATIONS-VERSION: 6.4.0
Auto-Submitted: auto-generated
Precedence: bulk
Return-Path: confluence@apache.org
MIME-Version: 1.0
------=_Part_1157937_100703508.1460535327916
Content-Type: text/html; charset="UTF-8"
Content-Transfer-Encoding: quoted-printable
Content-Disposition: inline
<html><head>
<meta http-equiv=3D"Content-Type" content=3D"text/html; charset=3Dutf-8">=20
<meta name=3D"viewport" content=3D"width=3Ddevice-width, initial-scale=3D1.=
0, maximum-scale=3D1.0">=20
<base href=3D"https://cwiki.apache.org/confluence">=20
<title>Message Title</title> =20
</head>
<body>
<table id=3D"background-table" cellpadding=3D"0" cellspacing=3D"0" width=3D=
"100%" style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table=
-rspace: 0pt; color: #333; background-color: #f5f5f5">=20
<tbody>=20
<tr>=20
<td id=3D"header-pattern-container" style=3D"padding: 0px; border-collapse:=
collapse; padding: 10px 20px">=20
<table id=3D"header-pattern" cellspacing=3D"0" cellpadding=3D"0" border=3D"=
0" style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table-rsp=
ace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td id=3D"header-avatar-image-container" valign=3D"top" style=3D"padding: 0=
px; border-collapse: collapse; vertical-align: top; width: 32px; padding-ri=
ght: 9px"><a href=3D"https://cwiki.apache.org/confluence/display/~noble.pau=
l@gmail.com?src=3Dmail&src.mail.timestamp=3D1460535327722&src.mail.=
notification=3Dcom.atlassian.confluence.plugins.confluence-content-notifica=
tions-plugin%3Apage-edited-notification&src.mail.recipient=3D8aa980874e=
bf4c1f0153f8e84cef212a" style=3D"color: #3b73af; text-decoration: none"><im=
g id=3D"header-avatar-image" class=3D"image_fix" src=3D"cid:avatar_f28ca7b9=
cf4d9bc13b72a24b1bd9230f" height=3D"32" width=3D"32" border=3D"0" style=3D"=
border-radius: 3px; vertical-align: top"></a></td>
<td id=3D"header-text-container" valign=3D"middle" style=3D"padding: 0px; b=
order-collapse: collapse; vertical-align: middle; font-family: Arial, sans-=
serif; font-size: 14px; line-height: 20px; mso-line-height-rule: exactly; m=
so-text-raise: 1px">Noble Paul <strong>edited</strong> a page</td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<!-- End Header pattern -->=20
<tr>=20
<td id=3D"email-content-container" style=3D"padding: 0px; border-collapse: =
collapse; padding: 0 20px">=20
<table id=3D"email-content-table" cellspacing=3D"0" cellpadding=3D"0" borde=
r=3D"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table-lspace=
: 0pt; mso-table-rspace: 0pt; color: #333; border-spacing: 0; border-collap=
se: separate">=20
<tbody>=20
<tr>=20
<td class=3D"email-content-rounded-top mobile-expand" style=3D"padding: 0px=
; border-collapse: collapse; color: #fff; padding: 0 15px 0 16px; height: 1=
5px; background-color: #fff; border-left: 1px solid #ccc; border-top: 1px s=
olid #ccc; border-right: 1px solid #ccc; border-bottom: 0; border-top-right=
-radius: 5px; border-top-left-radius: 5px"> </td>=20
</tr>=20
<tr>=20
<td class=3D"email-content-main mobile-expand" style=3D"padding: 0px; borde=
r-collapse: collapse; border-left: 1px solid #ccc; border-right: 1px solid =
#ccc; border-top: 0; border-bottom: 0; padding: 0 15px 15px 16px; backgroun=
d-color: #fff">=20
<table id=3D"page-title-pattern" cellspacing=3D"0" cellpadding=3D"0" border=
=3D"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table-lspace:=
0pt; mso-table-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td id=3D"page-title-pattern-icon-image-container" valign=3D"top" style=3D"=
padding: 0px; border-collapse: collapse; width: 16px; vertical-align: top">=
=20
<table cellspacing=3D"0" cellpadding=3D"0" border=3D"0" style=3D"border-col=
lapse: collapse; mso-table-lspace: 0pt; mso-table-rspace: 0pt; color: #333"=
>=20
<tbody>=20
<tr>=20
<td id=3D"page-title-pattern-icon-image-container-cell" style=3D"padding: 0=
px; border-collapse: collapse; width: 16px; padding: 9px 8px 0px 0px; mso-t=
ext-raise: 5px; mso-line-height-rule: exactly"><a href=3D"https://cwiki.apa=
che.org/confluence/display/solr/Config+API?src=3Dmail&src.mail.time=
stamp=3D1460535327722&src.mail.notification=3Dcom.atlassian.confluence.=
plugins.confluence-content-notifications-plugin%3Apage-edited-notification&=
amp;src.mail.recipient=3D8aa980874ebf4c1f0153f8e84cef212a&src.mail.acti=
on=3Dview" title=3D"page icon" style=3D"vertical-align: top;; color: #3b73a=
f; text-decoration: none"><img style=3D"vertical-align: top; display: block=
;" src=3D"cid:page-icon" alt=3D"page icon" title=3D"page icon" height=3D"16=
" width=3D"16" border=3D"0"></a></td>=20
</tr>=20
</tbody>=20
</table> </td>
<td style=3D"vertical-align: top;; padding: 0px; border-collapse: collapse;=
padding-right: 5px; font-size: 20px; line-height: 30px; mso-line-height-ru=
le: exactly" id=3D"page-title-pattern-header-container"><span id=3D"page-ti=
tle-pattern-header" style=3D"font-family: Arial, sans-serif; padding: 0; fo=
nt-size: 20px; line-height: 30px; mso-text-raise: 2px; mso-line-height-rule=
: exactly; vertical-align: middle"><a href=3D"https://cwiki.apache.org/conf=
luence/display/solr/Config+API?src=3Dmail&src.mail.timestamp=3D1460=
535327722&src.mail.notification=3Dcom.atlassian.confluence.plugins.conf=
luence-content-notifications-plugin%3Apage-edited-notification&src.mail=
.recipient=3D8aa980874ebf4c1f0153f8e84cef212a&src.mail.action=3Dview" t=
itle=3D"Config API" style=3D"color: #3b73af; text-decoration: none">Config =
API</a></span></td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td class=3D"email-content-main mobile-expand" style=3D"padding: 0px; borde=
r-collapse: collapse; border-left: 1px solid #ccc; border-right: 1px solid =
#ccc; border-top: 0; border-bottom: 0; padding: 0 15px 15px 16px; backgroun=
d-color: #fff">=20
<table class=3D"content-excerpt-pattern" cellspacing=3D"0" cellpadding=3D"0=
" border=3D"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table=
-lspace: 0pt; mso-table-rspace: 0pt; color: #333; font-family: Arial, sans-=
serif; font-size: 14px; line-height: 20px; mso-line-height-rule: exactly; m=
so-text-raise: 1px">=20
<tbody>=20
<tr>=20
<td class=3D"content-excerpt-pattern-container mobile-resize-text " style=
=3D"padding: 0px; border-collapse: collapse; padding: 0 0 0 24px"> <p class=
=3D"diff-context-placeholder" style=3D"margin: 10px 0 0 0; margin-top: 0">.=
..</p>=20
<ul class=3D"diff-block-target" style=3D"margin: 10px 0 0 0">=20
<li> <code style=3D"font-family: monospace">/config</code>: retrieve&n=
bsp;or modify the config. GET to retrieve and POST for executing commands</=
li>=20
<li> <code style=3D"font-family: monospace">/config/overlay</code>: re=
trieve the details in the <code style=3D"font-family: monospace">configover=
lay.json</code> alone</li>=20
<li> <code style=3D"font-family: monospace"><span class=3D"diff-html-added"=
id=3D"added-diff-0" style=3D"font-size: 100%; background-color: #ddfade;">=
/config/params</span></code><span class=3D"diff-html-added" style=3D"font-s=
ize: 100%; background-color: #ddfade;"> : See </span><a href=3D"https://cwi=
ki.apache.org/confluence/display/solr/Request+Parameters+API" rel=
=3D"nofollow" style=3D"color: #3b73af; text-decoration: none"><span class=
=3D"diff-html-added" style=3D"font-size: 100%; background-color: #ddfade;">=
this page</span></a><span class=3D"diff-html-added" style=3D"font-size: 100=
%; background-color: #ddfade;"> for more details</span> </li>=20
</ul> <h2 id=3D"ConfigAPI-Commands" class=3D"diff-block-context" style=3D"m=
argin: 10px 0 0 0; font-size: 20px; font-weight: normal; line-height: 30px;=
margin: 40px 0 0 0">Commands</h2> <p class=3D"diff-block-context" style=3D=
"margin: 10px 0 0 0">This API uses specific commands to tell Solr what prop=
erty or type of property to add to <code style=3D"font-family: monospa=
ce">configoverlay.json</code>. The commands are passed as part of the data =
sent with the request.</p> <p class=3D"diff-context-placeholder" style=3D"m=
argin: 10px 0 0 0; margin-top: 0">...</p> </td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td class=3D"email-content-main mobile-expand action-padding last-row-paddi=
ng" style=3D"padding: 0px; border-collapse: collapse; border-left: 1px soli=
d #ccc; border-right: 1px solid #ccc; border-top: 0; border-bottom: 0; padd=
ing: 0 15px 15px 16px; background-color: #fff; padding-bottom: 10px; paddin=
g-bottom: 10px">=20
<table id=3D"actions-pattern" cellspacing=3D"0" cellpadding=3D"0" border=3D=
"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table-lspace: 0p=
t; mso-table-rspace: 0pt; color: #333; font-family: Arial, sans-serif; font=
-size: 14px; line-height: 20px; mso-line-height-rule: exactly; mso-text-rai=
se: 1px">=20
<tbody>=20
<tr>=20
<td id=3D"actions-pattern-container" valign=3D"middle" style=3D"padding: 0p=
x; border-collapse: collapse; padding: 15px 0 0 24px; vertical-align: middl=
e">=20
<table align=3D"left" style=3D"border-collapse: collapse; mso-table-lspace:=
0pt; mso-table-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td class=3D"actions-pattern-action-icon-container" style=3D"padding: 0px; =
border-collapse: collapse; font-family: Arial, sans-serif; font-size: 14px;=
line-height: 20px; mso-line-height-rule: exactly; mso-text-raise: 0px; ver=
tical-align: middle"><a href=3D"https://cwiki.apache.org/confluence/display=
/solr/Config+API?src=3Dmail&src.mail.timestamp=3D1460535327722&=
src.mail.notification=3Dcom.atlassian.confluence.plugins.confluence-content=
-notifications-plugin%3Apage-edited-notification&src.mail.recipient=3D8=
aa980874ebf4c1f0153f8e84cef212a&src.mail.action=3Dview" title=3D"View p=
age Icon" style=3D"color: #3b73af; text-decoration: none"><img class=3D"act=
ions-pattern-action-icon-image" height=3D"16" width=3D"16" border=3D"0" tit=
le=3D"View page Icon" src=3D"cid:com.atlassian.confluence.plugins.confluenc=
e-email-resources%3Aview-page-email-adg-footer-item%3Aicon" alt=3D"View pag=
e Icon" style=3D"vertical-align: middle"></a></td>
<td class=3D"actions-pattern-action-text-container" style=3D"padding: 0px; =
border-collapse: collapse; font-family: Arial, sans-serif; font-size: 14px;=
line-height: 20px; mso-line-height-rule: exactly; mso-text-raise: 4px; pad=
ding-left: 5px; white-space: nowrap"><a href=3D"https://cwiki.apache.org/co=
nfluence/display/solr/Config+API?src=3Dmail&src.mail.timestamp=3D14=
60535327722&src.mail.notification=3Dcom.atlassian.confluence.plugins.co=
nfluence-content-notifications-plugin%3Apage-edited-notification&src.ma=
il.recipient=3D8aa980874ebf4c1f0153f8e84cef212a&src.mail.action=3Dview"=
title=3D"View page" style=3D"color: #3b73af; text-decoration: none">View p=
age</a></td>
<td class=3D"actions-pattern-action-bull" style=3D"padding: 0px; border-col=
lapse: collapse; font-family: Arial, sans-serif; font-size: 14px; line-heig=
ht: 20px; mso-line-height-rule: exactly; mso-text-raise: 4px; color: #999; =
padding: 0 5px">=E2=80=A2</td>=20
</tr>=20
</tbody>=20
</table>=20
<table align=3D"left" style=3D"border-collapse: collapse; mso-table-lspace:=
0pt; mso-table-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td class=3D"actions-pattern-action-icon-container" style=3D"padding: 0px; =
border-collapse: collapse; font-family: Arial, sans-serif; font-size: 14px;=
line-height: 20px; mso-line-height-rule: exactly; mso-text-raise: 0px; ver=
tical-align: middle"><a href=3D"https://cwiki.apache.org/confluence/display=
/solr/Config+API?showComments=3Dtrue&showCommentArea=3Dtrue&src=
=3Dmail&src.mail.timestamp=3D1460535327722&src.mail.notification=3D=
com.atlassian.confluence.plugins.confluence-content-notifications-plugin%3A=
page-edited-notification&src.mail.recipient=3D8aa980874ebf4c1f0153f8e84=
cef212a&src.mail.action=3Dcomment#addcomment" title=3D"Add comment Icon=
" style=3D"color: #3b73af; text-decoration: none"><img class=3D"actions-pat=
tern-action-icon-image" height=3D"16" width=3D"16" border=3D"0" title=3D"Ad=
d comment Icon" src=3D"cid:com.atlassian.confluence.plugins.confluence-emai=
l-resources%3Aadd-comment-to-content-email-adg-footer-item%3Aicon" alt=3D"A=
dd comment Icon" style=3D"vertical-align: middle"></a></td>
<td class=3D"actions-pattern-action-text-container" style=3D"padding: 0px; =
border-collapse: collapse; font-family: Arial, sans-serif; font-size: 14px;=
line-height: 20px; mso-line-height-rule: exactly; mso-text-raise: 4px; pad=
ding-left: 5px; white-space: nowrap"><a href=3D"https://cwiki.apache.org/co=
nfluence/display/solr/Config+API?showComments=3Dtrue&showCommentAre=
a=3Dtrue&src=3Dmail&src.mail.timestamp=3D1460535327722&src.mail=
.notification=3Dcom.atlassian.confluence.plugins.confluence-content-notific=
ations-plugin%3Apage-edited-notification&src.mail.recipient=3D8aa980874=
ebf4c1f0153f8e84cef212a&src.mail.action=3Dcomment#addcomment" title=3D"=
Add comment" style=3D"color: #3b73af; text-decoration: none">Add comment</a=
></td>
<td class=3D"actions-pattern-action-bull" style=3D"padding: 0px; border-col=
lapse: collapse; font-family: Arial, sans-serif; font-size: 14px; line-heig=
ht: 20px; mso-line-height-rule: exactly; mso-text-raise: 4px; color: #999; =
padding: 0 5px">=E2=80=A2</td>=20
</tr>=20
</tbody>=20
</table>=20
<table style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table=
-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td class=3D"actions-pattern-action-icon-container" style=3D"padding: 0px; =
border-collapse: collapse; font-family: Arial, sans-serif; font-size: 14px;=
line-height: 20px; mso-line-height-rule: exactly; mso-text-raise: 0px; ver=
tical-align: middle"><a href=3D"https://cwiki.apache.org/confluence/plugins=
/likes/like.action?contentId=3D50857507&src=3Dmail&src.mail.timesta=
mp=3D1460535327722&src.mail.notification=3Dcom.atlassian.confluence.plu=
gins.confluence-content-notifications-plugin%3Apage-edited-notification&=
;src.mail.recipient=3D8aa980874ebf4c1f0153f8e84cef212a&src.mail.action=
=3Dlike" title=3D"Like Icon" style=3D"color: #3b73af; text-decoration: none=
"><img class=3D"actions-pattern-action-icon-image" height=3D"16" width=3D"1=
6" border=3D"0" title=3D"Like Icon" src=3D"cid:com.atlassian.confluence.plu=
gins.confluence-like%3Aview-email-adg-content-item%3Aicon" alt=3D"Like Icon=
" style=3D"vertical-align: middle"></a></td>
<td class=3D"actions-pattern-action-text-container" style=3D"padding: 0px; =
border-collapse: collapse; font-family: Arial, sans-serif; font-size: 14px;=
line-height: 20px; mso-line-height-rule: exactly; mso-text-raise: 4px; pad=
ding-left: 5px; white-space: nowrap"><a href=3D"https://cwiki.apache.org/co=
nfluence/plugins/likes/like.action?contentId=3D50857507&src=3Dmail&=
src.mail.timestamp=3D1460535327722&src.mail.notification=3Dcom.atlassia=
n.confluence.plugins.confluence-content-notifications-plugin%3Apage-edited-=
notification&src.mail.recipient=3D8aa980874ebf4c1f0153f8e84cef212a&=
src.mail.action=3Dlike" title=3D"Like" style=3D"color: #3b73af; text-decora=
tion: none">Like</a></td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td class=3D"email-content-rounded-bottom mobile-expand" style=3D"padding: =
0px; border-collapse: collapse; color: #fff; height: 5px; line-height: 5px;=
padding: 0 15px 0 16px; background-color: #fff; border-bottom-right-radius=
: 5px; border-bottom-left-radius: 5px; border-top: 0; border-left: 1px soli=
d #ccc; border-bottom: 1px solid #ccc; border-right: 1px solid #ccc; mso-li=
ne-height-rule: exactly"> </td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td id=3D"footer-pattern" style=3D"padding: 0px; border-collapse: collapse;=
padding: 12px 20px">=20
<table id=3D"footer-pattern-container" cellspacing=3D"0" cellpadding=3D"0" =
border=3D"0" width=3D"100%" style=3D"border-collapse: collapse; mso-table-l=
space: 0pt; mso-table-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td id=3D"footer-pattern-links-container" width=3D"100%" style=3D"padding: =
0px; border-collapse: collapse; color: #999; font-size: 12px; line-height: =
18px; font-family: Arial, sans-serif; mso-line-height-rule: exactly; mso-te=
xt-raise: 2px">=20
<table align=3D"left" style=3D"border-collapse: collapse; mso-table-lspace:=
0pt; mso-table-rspace: 0pt; color: #333; font-size: 12px; line-height: 18p=
x; font-family: Arial, sans-serif; mso-line-height-rule: exactly; mso-text-=
raise: 2px">=20
<tbody>=20
<tr>=20
<td class=3D"footer-pattern-links mobile-resize-text" style=3D"padding: 0px=
; border-collapse: collapse"><a href=3D"https://cwiki.apache.org/confluence=
/users/removespacenotification.action?spaceKey=3Dsolr&src=3Dmail&sr=
c.mail.timestamp=3D1460535327722&src.mail.notification=3Dcom.atlassian.=
confluence.plugins.confluence-content-notifications-plugin%3Apage-edited-no=
tification&src.mail.recipient=3D8aa980874ebf4c1f0153f8e84cef212a&sr=
c.mail.action=3Dstop-watching" title=3D"" style=3D"color: #3b73af; text-dec=
oration: none">Stop watching space</a></td>
<td class=3D"footer-pattern-links-bull" style=3D"padding: 0px; border-colla=
pse: collapse; padding: 0 5px; color: #999">=E2=80=A2</td>=20
</tr>=20
</tbody>=20
</table>=20
<table style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table=
-rspace: 0pt; color: #333; font-size: 12px; line-height: 18px; font-family:=
Arial, sans-serif; mso-line-height-rule: exactly; mso-text-raise: 2px">=20
<tbody>=20
<tr>=20
<td class=3D"footer-pattern-links mobile-resize-text" style=3D"padding: 0px=
; border-collapse: collapse"><a href=3D"https://cwiki.apache.org/confluence=
/users/editmyemailsettings.action?src=3Dmail&src.mail.timestamp=3D14605=
35327722&src.mail.notification=3Dcom.atlassian.confluence.plugins.confl=
uence-content-notifications-plugin%3Apage-edited-notification&src.mail.=
recipient=3D8aa980874ebf4c1f0153f8e84cef212a&src.mail.action=3Dmanage" =
title=3D"" style=3D"color: #3b73af; text-decoration: none">Manage notificat=
ions</a></td>=20
</tr>=20
</tbody>=20
</table> </td>
<td id=3D"footer-pattern-logo-desktop-container" rowspan=3D"2" valign=3D"to=
p" style=3D"padding: 0px; border-collapse: collapse; padding-left: 20px; ve=
rtical-align: top">=20
<table style=3D"border-collapse: collapse; mso-table-lspace: 0pt; mso-table=
-rspace: 0pt; color: #333">=20
<tbody>=20
<tr>=20
<td id=3D"footer-pattern-logo-desktop-padding" style=3D"padding: 0px; borde=
r-collapse: collapse; padding-top: 3px"><img id=3D"footer-pattern-logo-desk=
top" src=3D"cid:footer-desktop-logo" alt=3D"Confluence logo big" title=3D"C=
onfluence logo big" width=3D"132" height=3D"20" class=3D"image_fix"></td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
<tr>=20
<td id=3D"footer-pattern-text" class=3D"mobile-resize-text" width=3D"100%" =
style=3D"padding: 0px; border-collapse: collapse; color: #999; font-size: 1=
2px; line-height: 18px; font-family: Arial, sans-serif; mso-line-height-rul=
e: exactly; mso-text-raise: 2px; display: none">This message was sent by At=
lassian Confluence 5.8.4<br> <img id=3D"footer-pattern-logo-mobile" src=3D"=
cid:footer-mobile-logo" alt=3D"" title=3D"" width=3D"0" height=3D"0" style=
=3D"display: none; mso-hide: all"></td>=20
</tr>=20
</tbody>=20
</table> </td>=20
</tr>=20
</tbody>=20
</table>=20
<table id=3D"sealed-section" border=3D"0" cellpadding=3D"0" cellspacing=3D"=
0" width=3D"0" style=3D"border-collapse: collapse; mso-table-lspace: 0pt; m=
so-table-rspace: 0pt; color: #333; display: none">=20
<tbody>=20
<tr>=20
<td style=3D"padding: 0px; border-collapse: collapse; border: 0; font-size:=
0px; line-height: 0; mso-line-height-rule: exactly"></td>=20
</tr>=20
</tbody>=20
</table>
</body>
</html>=
------=_Part_1157937_100703508.1460535327916--"""
if __name__ == '__main__':
unittest.main()
| 49.52693
| 196
| 0.726587
|
ec8898c4aac40337edcfc977a0a6bfbf011ed9bb
| 1,858
|
py
|
Python
|
reconstruct.py
|
robclouth/RAVE
|
a09dec0370b776038a001d8b03944181a20047d3
|
[
"MIT"
] | 2
|
2022-01-29T20:09:19.000Z
|
2022-01-31T22:50:42.000Z
|
reconstruct.py
|
gnhdnb/RAVE
|
4cf6cbd8706a7acbb0882be09d2a2d5539cf713b
|
[
"MIT"
] | null | null | null |
reconstruct.py
|
gnhdnb/RAVE
|
4cf6cbd8706a7acbb0882be09d2a2d5539cf713b
|
[
"MIT"
] | null | null | null |
import torch
torch.set_grad_enabled(False)
from tqdm import tqdm
from rave import RAVE
from rave.core import search_for_run
from effortless_config import Config
from os import path, makedirs, environ
from pathlib import Path
import librosa as li
import GPUtil as gpu
import soundfile as sf
class args(Config):
CKPT = None # PATH TO YOUR PRETRAINED CHECKPOINT
WAV_FOLDER = None # PATH TO YOUR WAV FOLDER
OUT = "./reconstruction/"
args.parse_args()
# GPU DISCOVERY
CUDA = gpu.getAvailable(maxMemory=.05)
if len(CUDA):
environ["CUDA_VISIBLE_DEVICES"] = str(CUDA[0])
use_gpu = 1
elif torch.cuda.is_available():
print("Cuda is available but no fully free GPU found.")
print("Reconstruction may be slower due to concurrent processes.")
use_gpu = 1
else:
print("No GPU found.")
use_gpu = 0
device = torch.device("cuda:0" if use_gpu else "cpu")
# LOAD RAVE
rave = RAVE.load_from_checkpoint(
search_for_run(args.CKPT),
strict=False,
).eval().to(device)
# COMPUTE LATENT COMPRESSION RATIO
x = torch.randn(1, 1, 2**14).to(device)
z = rave.encode(x)
ratio = x.shape[-1] // z.shape[-1]
# SEARCH FOR WAV FILES
audios = tqdm(list(Path(args.WAV_FOLDER).rglob("*.wav")))
# RECONSTRUCTION
makedirs(args.OUT, exist_ok=True)
for audio in audios:
audio_name = path.splitext(path.basename(audio))[0]
audios.set_description(audio_name)
# LOAD AUDIO TO TENSOR
x, sr = li.load(audio, sr=rave.sr)
x = torch.from_numpy(x).reshape(1, 1, -1).float().to(device)
# PAD AUDIO
n_sample = x.shape[-1]
pad = (ratio - (n_sample % ratio)) % ratio
x = torch.nn.functional.pad(x, (0, pad))
# ENCODE / DECODE
y = rave.decode(rave.encode(x))
y = y.reshape(-1).cpu().numpy()[:n_sample]
# WRITE AUDIO
sf.write(path.join(args.OUT, f"{audio_name}_reconstruction.wav"), y, sr)
| 23.518987
| 76
| 0.68676
|
a0d234386d182fd417ef1d4ddb5f2b61358e5f3e
| 216
|
py
|
Python
|
packageName/module.py
|
GaetanDesrues/EmptyPythonPackage
|
35caa1430666f1368c0f3d8f7945370c093b9b50
|
[
"MIT"
] | null | null | null |
packageName/module.py
|
GaetanDesrues/EmptyPythonPackage
|
35caa1430666f1368c0f3d8f7945370c093b9b50
|
[
"MIT"
] | null | null | null |
packageName/module.py
|
GaetanDesrues/EmptyPythonPackage
|
35caa1430666f1368c0f3d8f7945370c093b9b50
|
[
"MIT"
] | null | null | null |
class Class:
""" Documentation
"""
def __init__(self):
pass
def function(self, arg: int):
""" Simple function
:param int arg: Function argument
"""
pass
| 15.428571
| 45
| 0.5
|
64022e6fe5fd9c5a7bf48e48e5b0fc7d6562eb96
| 2,856
|
py
|
Python
|
connect_four/evaluation/incremental_victor/incremental_victor_evaluator.py
|
rpachauri/connect4
|
6caf6965afaaff6883193ac295c6ac5b1f4e9c4a
|
[
"MIT"
] | null | null | null |
connect_four/evaluation/incremental_victor/incremental_victor_evaluator.py
|
rpachauri/connect4
|
6caf6965afaaff6883193ac295c6ac5b1f4e9c4a
|
[
"MIT"
] | null | null | null |
connect_four/evaluation/incremental_victor/incremental_victor_evaluator.py
|
rpachauri/connect4
|
6caf6965afaaff6883193ac295c6ac5b1f4e9c4a
|
[
"MIT"
] | null | null | null |
from connect_four.envs import ConnectFourEnv
from connect_four.evaluation import ProofStatus, NodeType
from connect_four.evaluation.simple_evaluator import SimpleEvaluator
from connect_four.evaluation.board import Board
from connect_four.evaluation.incremental_victor.graph.graph_manager import GraphManager
from connect_four.evaluation.incremental_victor.solution.victor_solution_manager import VictorSolutionManager
from connect_four.problem import ConnectFourGroupManager
class IncrementalVictor(SimpleEvaluator):
def __init__(self, model: ConnectFourEnv):
super().__init__(model=model)
problem_manager = ConnectFourGroupManager(env_variables=self.model.env_variables)
solution_manager = VictorSolutionManager(env_variables=self.model.env_variables)
self.graph_manager = GraphManager(
player=self.model.env_variables.player_turn,
problem_manager=problem_manager,
solution_manager=solution_manager,
)
self.reached_terminal_state = False
def move(self, action: int):
"""
Modifies:
- This Evaluator's model will be at the resulting state.
Args:
action (int): an action that can be applied in the current state of this evaluator's model environment.
"""
super().move(action=action)
if self.done:
self.reached_terminal_state = True
else:
board = Board(env_variables=self.model.env_variables)
row = 0
playable_square = board.playable_square(col=action)
if playable_square is not None:
row = playable_square.row + 1
self.model.render()
self.graph_manager.move(row=row, col=action)
def undo_move(self):
super().undo_move()
if self.reached_terminal_state:
self.reached_terminal_state = False
else:
self.graph_manager.undo_move()
def evaluate(self) -> ProofStatus:
proof_status = super().evaluate()
if proof_status != ProofStatus.Unknown:
return proof_status
evaluation = self.graph_manager.evaluate()
# board = Board(env_variables=self.model.env_variables)
# evaluation = evaluator.evaluate(board=board)
if evaluation is not None:
if self.node_type == NodeType.OR:
# If it is White's turn and the evaluation is not None,
# then Black has found a way to disprove this node.
return ProofStatus.Disproven
else:
# If it is Black's turn and the evaluation is not None,
# then White has found a way to prove this node.
return ProofStatus.Proven
# If the evaluation is None, then this state is unknown.
return ProofStatus.Unknown
| 38.594595
| 115
| 0.667017
|
1f92fd5433c046e175cf44e37cbe1bb6608ecb0e
| 10,497
|
py
|
Python
|
saleor/dashboard/drawercash/views.py
|
glosoftgroup/KahawaHardware
|
893e94246583addf41c3bb0d58d2ce6bcd233c4f
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/dashboard/drawercash/views.py
|
glosoftgroup/KahawaHardware
|
893e94246583addf41c3bb0d58d2ce6bcd233c4f
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/dashboard/drawercash/views.py
|
glosoftgroup/KahawaHardware
|
893e94246583addf41c3bb0d58d2ce6bcd233c4f
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import get_object_or_404, redirect, render_to_response
from django.template.response import TemplateResponse
from django.http import HttpResponse
from django.core.paginator import Paginator, PageNotAnInteger, InvalidPage, EmptyPage
from django.db.models import Q
from ..views import staff_member_required
from ...userprofile.models import User, UserTrail
from ...sale.models import Terminal, DrawerCash, TerminalHistoryEntry
from ...decorators import permission_decorator, user_trail
import logging
debug_logger = logging.getLogger('debug_logger')
info_logger = logging.getLogger('info_logger')
error_logger = logging.getLogger('error_logger')
@staff_member_required
def transactions(request):
try:
transactions = DrawerCash.objects.all().order_by('-id')
page = request.GET.get('page', 1)
paginator = Paginator(transactions, 10)
try:
transactions = paginator.page(page)
except PageNotAnInteger:
transactions = paginator.page(1)
except InvalidPage:
transactions = paginator.page(1)
except EmptyPage:
transactions = paginator.page(paginator.num_pages)
user_trail(request.user.name, 'accessed transaction', 'view')
info_logger.info('User: ' + str(request.user.name) + 'accessed transaction:')
return TemplateResponse(request, 'dashboard/cashmovement/transactions.html',{'transactions':transactions, 'pn': paginator.num_pages})
except TypeError as e:
error_logger.error(e)
return TemplateResponse(request, 'dashboard/cashmovement/transactions.html', {'transactions':transactions, 'pn': paginator.num_pages})
def transaction_pagination(request):
page = int(request.GET.get('page', 1))
list_sz = request.GET.get('size')
p2_sz = request.GET.get('psize')
select_sz = request.GET.get('select_size')
transactions = DrawerCash.objects.all().order_by('-id')
if list_sz:
paginator = Paginator(transactions, int(list_sz))
transactions = paginator.page(page)
return TemplateResponse(request, 'dashboard/cashmovement/pagination/p2.html',
{'transactions':transactions, 'pn': paginator.num_pages, 'sz': list_sz, 'gid': 0})
else:
paginator = Paginator(transactions, 10)
if p2_sz:
paginator = Paginator(transactions, int(p2_sz))
transactions = paginator.page(page)
return TemplateResponse(request, 'dashboard/cashmovement/pagination/paginate.html', {"transactions":transactions})
try:
transactions = paginator.page(page)
except PageNotAnInteger:
transactions = paginator.page(1)
except InvalidPage:
transactions = paginator.page(1)
except EmptyPage:
transactions = paginator.page(paginator.num_pages)
return TemplateResponse(request, 'dashboard/cashmovement/pagination/paginate.html', {"transactions":transactions})
@staff_member_required
def transaction_search(request):
if request.is_ajax():
page = request.GET.get('page', 1)
list_sz = request.GET.get('size', 10)
p2_sz = request.GET.get('psize')
q = request.GET.get('q')
if list_sz is None:
sz = 10
else:
sz = list_sz
if q is not None:
queryset_list = DrawerCash.objects.filter(
Q(user__name__icontains=q) |
Q(user__email__icontains=q) |
Q(terminal__terminal_name__icontains=q) |
Q(trans_type__icontains=q) |
Q(manager__name__icontains=q) |
Q(manager__email__icontains=q)
).order_by('-id')
paginator = Paginator(queryset_list, 10)
try:
queryset_list = paginator.page(page)
except PageNotAnInteger:
queryset_list = paginator.page(1)
except InvalidPage:
queryset_list = paginator.page(1)
except EmptyPage:
queryset_list = paginator.page(paginator.num_pages)
transactions = queryset_list
if p2_sz:
transactions = paginator.page(page)
return TemplateResponse(request, 'dashboard/cashmovement/pagination/paginate.html', {"transactions":transactions})
return TemplateResponse(request, 'dashboard/cashmovement/pagination/search.html',
{"transactions":transactions, 'pn': paginator.num_pages, 'sz': sz, 'q': q})
@staff_member_required
@permission_decorator('sale.view_terminal')
def terminals(request):
try:
users = Terminal.objects.all().order_by('-id')
page = request.GET.get('page', 1)
paginator = Paginator(users, 10)
try:
users = paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except InvalidPage:
users = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
user_trail(request.user.name, 'accessed Terminals', 'view')
info_logger.info('User: ' + str(request.user.name) + ' accessed terminals')
return TemplateResponse(request, 'dashboard/terminal/terminals.html',{'users': users, 'pn': paginator.num_pages})
except TypeError as e:
error_logger.error(e)
return TemplateResponse(request, 'dashboard/terminal/terminals.html', {'users': users, 'pn': paginator.num_pages})
def terminal_pagination(request):
page = int(request.GET.get('page', 1))
list_sz = request.GET.get('size')
p2_sz = request.GET.get('psize')
select_sz = request.GET.get('select_size')
users = Terminal.objects.all().order_by('-id')
if list_sz:
paginator = Paginator(users, int(list_sz))
users = paginator.page(page)
return TemplateResponse(request, 'dashboard/terminal/pagination/p2.html',
{'users':users, 'pn': paginator.num_pages, 'sz': list_sz, 'gid': 0})
else:
paginator = Paginator(users, 10)
if p2_sz:
paginator = Paginator(users, int(p2_sz))
users = paginator.page(page)
return TemplateResponse(request, 'dashboard/terminal/pagination/paginate.html', {"users":users})
try:
users = paginator.page(page)
except PageNotAnInteger:
users = paginator.page(1)
except InvalidPage:
users = paginator.page(1)
except EmptyPage:
users = paginator.page(paginator.num_pages)
return TemplateResponse(request, 'dashboard/terminal/pagination/paginate.html', {"users":users})
@staff_member_required
def terminal_search(request):
if request.is_ajax():
page = request.GET.get('page', 1)
list_sz = request.GET.get('size', 10)
p2_sz = request.GET.get('psize')
q = request.GET.get('q')
if list_sz is None:
sz = 10
else:
sz = list_sz
if q is not None:
queryset_list = Terminal.objects.filter(
Q(terminal_name__icontains=q)|
Q(terminal_number__icontains=q)
).order_by('-id')
paginator = Paginator(queryset_list, 10)
try:
queryset_list = paginator.page(page)
except PageNotAnInteger:
queryset_list = paginator.page(1)
except InvalidPage:
queryset_list = paginator.page(1)
except EmptyPage:
queryset_list = paginator.page(paginator.num_pages)
users = queryset_list
if p2_sz:
users = paginator.page(page)
return TemplateResponse(request, 'dashboard/terminal/pagination/paginate.html', {"users":users})
return TemplateResponse(request, 'dashboard/terminal/pagination/search.html',
{"users":users, 'pn': paginator.num_pages, 'sz': sz, 'q': q})
@staff_member_required
@permission_decorator('sale.add_terminal')
def terminal_add(request):
try:
user_trail(request.user.name, 'accessed add terminal page', 'view')
info_logger.info('User: ' + str(request.user.name) + 'accessed terminal add page')
return TemplateResponse(request, 'dashboard/terminal/add_terminal.html',{})
except TypeError as e:
error_logger.error(e)
return HttpResponse('error accessing add terminal page')
@staff_member_required
def terminal_process(request):
user = Terminal.objects.all()
if request.method == 'POST':
terminal_name = request.POST.get('name')
terminal_number = request.POST.get('nid')
new_user = Terminal.objects.create(
terminal_name = terminal_name,
terminal_number = terminal_number,
)
try:
new_user.save()
user_trail(request.user.name, 'created Terminal: ' + str(terminal_name), 'add')
info_logger.info('User: ' + str(request.user.name) + ' created terminal:' + str(terminal_name))
except Exception as e:
error_logger.info(e)
last_id = Terminal.objects.latest('id')
return HttpResponse(last_id.id)
def terminal_detail(request, pk):
user = get_object_or_404(Terminal, pk=pk)
user_trail(request.user.name, 'accessed terminal: ' + str(user.terminal_name), 'view')
info_logger.info('User: ' + str(request.user.name) + ' accessed terminal:' + str(user.terminal_name))
return TemplateResponse(request, 'dashboard/terminal/detail.html', {'user':user})
def terminal_delete(request, pk):
terminal = get_object_or_404(Terminal, pk=pk)
if request.method == 'POST':
terminal.delete()
user_trail(request.user.name, 'deleted terminal: '+ str(terminal.terminal_name), 'delete')
info_logger.info('User: ' + str(request.user.name) + ' deleted terminal:' + str(terminal.terminal_name))
return HttpResponse('success')
def terminal_edit(request, pk):
terminal = get_object_or_404(Terminal, pk=pk)
ctx = {'user': terminal}
user_trail(request.user.name, 'accessed edit page for user '+ str(terminal.terminal_name),'update')
info_logger.info('User: '+str(request.user.name)+' accessed edit page for user: '+str(terminal.terminal_name))
return TemplateResponse(request, 'dashboard/terminal/terminal_edit.html', ctx)
def terminal_update(request, pk):
terminal = get_object_or_404(Terminal, pk=pk)
if request.method == 'POST':
name = request.POST.get('name')
nid = request.POST.get('nid')
terminal.terminal_name = name
terminal.terminal_number = nid
terminal.save()
user_trail(request.user.name, 'updated terminal: '+ str(terminal.terminal_name))
info_logger.info('User: '+str(request.user.name)+' updated terminal: '+str(terminal.terminal_name))
return HttpResponse("success")
@staff_member_required
def terminal_history(request,pk=None):
if request.method == 'GET':
if pk:
instance = get_object_or_404(Terminal, pk=pk)
terminal_history = TerminalHistoryEntry.objects.filter(terminal=instance).order_by('-id')
ctx = {'terminal_history':terminal_history}
#user_trail(request.user.name, 'accessed terminal history for terminal: ' + str(instance.terminal_name), 'view')
#info_logger.info('User: ' + str(request.user.name) + 'accessed terminal history for terminal for:' + str(user.terminal_name))
return TemplateResponse(request, 'dashboard/includes/_terminal_history.html', ctx)
def user_trails(request):
users = UserTrail.objects.all().order_by('id')
user_trail(request.user.name, 'accessed user trail page')
info_logger.info('User: '+str(request.user.name)+' accessed the user trail page')
return TemplateResponse(request, 'dashboard/users/trail.html', {'users':users})
| 38.032609
| 136
| 0.742403
|
327549749a37a7a84bbd437ac70096d14037e18d
| 299
|
py
|
Python
|
ESP32S/workSpace/WS2812.py
|
hu-tianyi/AuTrix
|
4058cfba6d2d96b6a3f6b6564c0a85c855e57fa9
|
[
"MIT"
] | null | null | null |
ESP32S/workSpace/WS2812.py
|
hu-tianyi/AuTrix
|
4058cfba6d2d96b6a3f6b6564c0a85c855e57fa9
|
[
"MIT"
] | null | null | null |
ESP32S/workSpace/WS2812.py
|
hu-tianyi/AuTrix
|
4058cfba6d2d96b6a3f6b6564c0a85c855e57fa9
|
[
"MIT"
] | null | null | null |
import machine, time, neopixel
print("aaaaaaaaaaaaa")
n = 256
p = 5
np = neopixel.NeoPixel(machine.Pin(p), n)
for index in range (256):
np[index] = (20,0,0)
np.write()
time.sleep_ms(500)
for index in range (256):
np[index] = (0,0,0)
np.write()
print("Executed WS2812 Command")
| 10.678571
| 41
| 0.638796
|
880055672d39f68bb8acb9f92ed8e5e1a7a151f5
| 1,572
|
py
|
Python
|
123/chessman/Pao.py
|
chenhongyan950312/xiangqidaima
|
159a814f86c10d8fceaead68dca8286ea41727fa
|
[
"MIT"
] | null | null | null |
123/chessman/Pao.py
|
chenhongyan950312/xiangqidaima
|
159a814f86c10d8fceaead68dca8286ea41727fa
|
[
"MIT"
] | null | null | null |
123/chessman/Pao.py
|
chenhongyan950312/xiangqidaima
|
159a814f86c10d8fceaead68dca8286ea41727fa
|
[
"MIT"
] | null | null | null |
from ChessPiece import ChessPiece
class Pao(ChessPiece):
def get_image_file_name(self):
if self.selected:
if self.is_red:
return "/home/chy/cchess-zero-master/images/RCS.GIF"
else:
return "/home/chy/cchess-zero-master/images/BCS.GIF"
else:
if self.is_red:
return "/home/chy/cchess-zero-master/images/RC.GIF"
else:
return "/home/chy/cchess-zero-master/images/BC.GIF"
def get_selected_image(self):
if self.is_red:
return "/home/chy/cchess-zero-master/images/RCS.GIF"
else:
return "/home/chy/cchess-zero-master/images/BCS.GIF"
def can_move(self, board, dx, dy):
if dx != 0 and dy != 0:
#print 'no diag'
return False
nx, ny = self.x + dx, self.y + dy
if nx < 0 or nx > 8 or ny < 0 or ny > 9:
return False
if (nx, ny) in board.pieces:
if board.pieces[nx, ny].is_red == self.is_red:
#print 'blocked by yourself'
return False
cnt = self.count_pieces(board, self.x, self.y, dx, dy)
# print 'Pao cnt',cnt
if (nx, ny) not in board.pieces:
if cnt!= 0:
#print 'blocked'
return False
else:
if cnt != 1:
#print 'cannot kill'
return False
return True
def __init__(self, x, y, is_red, direction):
ChessPiece.__init__(self, x, y, is_red, direction)
| 30.823529
| 68
| 0.521628
|
2337239a22ce53b8393e898c9d5c10391830e810
| 10,842
|
py
|
Python
|
CAIL2020/znwd/dumpes.py
|
ShenDezhou/CAIL
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
[
"Apache-2.0"
] | 71
|
2020-07-16T01:49:27.000Z
|
2022-03-27T16:55:00.000Z
|
CAIL2020/znwd/dumpes.py
|
ShenDezhou/CAIL
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
[
"Apache-2.0"
] | 11
|
2020-09-18T14:26:25.000Z
|
2022-02-09T23:49:33.000Z
|
CAIL2020/znwd/dumpes.py
|
ShenDezhou/CAIL
|
c4cfa98ab4ecedbce34a7a5a186830486047540c
|
[
"Apache-2.0"
] | 16
|
2020-07-15T07:24:30.000Z
|
2022-03-19T05:41:11.000Z
|
import os
import os.path
import time
import json
import codecs
import elasticsearch
import progressbar
from backports import csv
from functools import wraps
from zipfile import ZipFile, ZIP_DEFLATED
FLUSH_BUFFER = 1000 # Chunk of docs to flush in temp file
CONNECTION_TIMEOUT = 120
TIMES_TO_TRY = 3
RETRY_DELAY = 60
META_FIELDS = ['_id']
# Retry decorator for functions with exceptions
def retry(ExceptionToCheck, tries=TIMES_TO_TRY, delay=RETRY_DELAY):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries = tries
while mtries > 0:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
print(e)
print(('Retrying in {} seconds ...'.format(delay)))
time.sleep(delay)
mtries -= 1
else:
print('Done.')
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
print(('Fatal Error: {}'.format(e)))
exit(1)
return f_retry
return deco_retry
class Es2disk:
def __init__(self, opts):
self.opts = opts
self.num_results = 0
self.scroll_ids = []
self.scroll_time = '30m'
self.csv_headers = list(META_FIELDS) if self.opts.meta_fields else []
self.tmp_file = '{}.tmp'.format(opts.output_file)
@retry(elasticsearch.exceptions.ConnectionError, tries=TIMES_TO_TRY)
def create_connection(self):
es = elasticsearch.Elasticsearch(self.opts.url, timeout=CONNECTION_TIMEOUT, http_auth=self.opts.auth)
es.cluster.health()
self.es_conn = es
@retry(elasticsearch.exceptions.ConnectionError, tries=TIMES_TO_TRY)
def check_indexes(self):
indexes = self.opts.index_prefixes
if '_all' in indexes:
indexes = ['_all']
else:
indexes = [index for index in indexes if self.es_conn.indices.exists(index)]
if not indexes:
print(('Any of index(es) {} does not exist in {}.'.format(', '.join(self.opts.index_prefixes),
self.opts.url)))
exit(1)
self.opts.index_prefixes = indexes
@retry(elasticsearch.exceptions.ConnectionError, tries=TIMES_TO_TRY)
def search_query(self):
@retry(elasticsearch.exceptions.ConnectionError, tries=TIMES_TO_TRY)
def next_scroll(scroll_id):
return self.es_conn.scroll(scroll=self.scroll_time, scroll_id=scroll_id)
search_args = dict(
index=','.join(self.opts.index_prefixes),
sort=','.join(self.opts.sort),
scroll=self.scroll_time,
size=self.opts.scroll_size,
terminate_after=self.opts.max_results
)
if self.opts.doc_types:
search_args['doc_type'] = self.opts.doc_types
if self.opts.query.startswith('@'):
query_file = self.opts.query[1:]
if os.path.exists(query_file):
with codecs.open(query_file, mode='r', encoding='utf-8') as f:
self.opts.query = f.read()
else:
print(('No such file: {}.'.format(query_file)))
exit(1)
if self.opts.raw_query:
try:
query = json.loads(self.opts.query)
except ValueError as e:
print(('Invalid JSON syntax in query. {}'.format(e)))
exit(1)
search_args['body'] = query
else:
query = self.opts.query if not self.opts.tags else '{} AND tags: ({})'.format(
self.opts.query, ' AND '.join(self.opts.tags))
search_args['q'] = query
if '_all' not in self.opts.fields:
search_args['_source_include'] = ','.join(self.opts.fields)
self.csv_headers.extend([field for field in self.opts.fields if '*' not in field])
if self.opts.debug_mode:
print(('Using these indices: {}.'.format(', '.join(self.opts.index_prefixes))))
print(('Query[{0[0]}]: {0[1]}.'.format(
('Query DSL', json.dumps(query, ensure_ascii=False).encode('utf8')) if self.opts.raw_query else (
'Lucene', query))
))
print(('Output field(s): {}.'.format(', '.join(self.opts.fields))))
print(('Sorting by: {}.'.format(', '.join(self.opts.sort))))
res = self.es_conn.search(**search_args)
self.num_results = res['hits']['total']
print(('Found {} results.'.format(self.num_results)))
if self.opts.debug_mode:
print((json.dumps(res, ensure_ascii=False).encode('utf8')))
if self.num_results > 0:
codecs.open(self.opts.output_file, mode='w', encoding='utf-8').close()
codecs.open(self.tmp_file, mode='w', encoding='utf-8').close()
hit_list = []
total_lines = 0
widgets = ['Run query ',
progressbar.Bar(left='[', marker='#', right=']'),
progressbar.FormatLabel(' [%(value)i/%(max)i] ['),
progressbar.Percentage(),
progressbar.FormatLabel('] [%(elapsed)s] ['),
progressbar.ETA(), '] [',
progressbar.FileTransferSpeed(unit='docs'), ']'
]
bar = progressbar.ProgressBar(widgets=widgets, maxval=self.num_results).start()
while total_lines != self.num_results:
if res['_scroll_id'] not in self.scroll_ids:
self.scroll_ids.append(res['_scroll_id'])
if not res['hits']['hits']:
print(('Scroll[{}] expired(multiple reads?). Saving loaded data.'.format(res['_scroll_id'])))
break
for hit in res['hits']['hits']:
total_lines += 1
bar.update(total_lines)
hit_list.append(hit)
if len(hit_list) == FLUSH_BUFFER:
self.flush_to_file(hit_list)
hit_list = []
if self.opts.max_results:
if total_lines == self.opts.max_results:
self.flush_to_file(hit_list)
print(('Hit max result limit: {} records'.format(self.opts.max_results)))
return
res = next_scroll(res['_scroll_id'])
self.flush_to_file(hit_list)
bar.finish()
def flush_to_file(self, hit_list):
def to_keyvalue_pairs(source, ancestors=[], header_delimeter='.'):
def is_list(arg):
return type(arg) is list
def is_dict(arg):
return type(arg) is dict
if is_dict(source):
for key in list(source.keys()):
to_keyvalue_pairs(source[key], ancestors + [key])
elif is_list(source):
if self.opts.kibana_nested:
[to_keyvalue_pairs(item, ancestors) for item in source]
else:
[to_keyvalue_pairs(item, ancestors + [str(index)]) for index, item in enumerate(source)]
else:
header = header_delimeter.join(ancestors)
if header not in self.csv_headers:
self.csv_headers.append(header)
try:
out[header] = '{}{}{}'.format(out[header], self.opts.delimiter, source)
except:
out[header] = source
with codecs.open(self.tmp_file, mode='a', encoding='utf-8') as tmp_file:
for hit in hit_list:
out = {field: hit[field] for field in META_FIELDS} if self.opts.meta_fields else {}
if '_source' in hit and len(hit['_source']) > 0:
to_keyvalue_pairs(hit['_source'])
tmp_file.write('{}\n'.format(json.dumps(out)))
tmp_file.close()
def write_to_csv(self):
if self.num_results > 0:
self.num_results = sum(1 for line in codecs.open(self.tmp_file, mode='r', encoding='utf-8'))
if self.num_results > 0:
output_file = codecs.open(self.opts.output_file, mode='a', encoding='utf-8')
csv_writer = csv.DictWriter(output_file, fieldnames=self.csv_headers)
csv_writer.writeheader()
timer = 0
widgets = ['Write to csv ',
progressbar.Bar(left='[', marker='#', right=']'),
progressbar.FormatLabel(' [%(value)i/%(max)i] ['),
progressbar.Percentage(),
progressbar.FormatLabel('] [%(elapsed)s] ['),
progressbar.ETA(), '] [',
progressbar.FileTransferSpeed(unit='lines'), ']'
]
bar = progressbar.ProgressBar(widgets=widgets, maxval=self.num_results).start()
for line in codecs.open(self.tmp_file, mode='r', encoding='utf-8'):
timer += 1
bar.update(timer)
csv_writer.writerow(json.loads(line))
output_file.close()
bar.finish()
if self.opts.zip:
with ZipFile(self.opts.output_file + ".zip", mode='a', compression=ZIP_DEFLATED) as zip_file:
zip_file.write(self.opts.output_file)
os.remove(self.opts.output_file)
else:
print(('There is no docs with selected field(s): {}.'.format(','.join(self.opts.fields))))
os.remove(self.tmp_file)
def clean_scroll_ids(self):
try:
self.es_conn.clear_scroll(body=','.join(self.scroll_ids))
except:
pass
class Opts(object):
def __init__(self, d):
self.__dict__ = d
if __name__ == "__main__":
opts = {}
opts["url"] = "http://192.168.10.23:9400"
opts["index_prefixes"] = ["znwd_faxiaobao_3"]
opts["doc_types"] = "znwd_test3_2"
opts["fields"] = ["title", "content", "error_content"]
opts["query"] = "*"
opts["output_file"] = "data_train.csv"
opts["scroll_size"] = 10000
opts["meta_fields"] = False
opts["auth"] = False
opts["sort"] = []
opts["max_results"] = 7137760
opts["raw_query"] = False
opts["tags"] = []
opts["debug_mode"] = False
opts["zip"] = True
es = Es2disk(Opts(opts))
es.create_connection()
es.check_indexes()
es.search_query()
es.write_to_csv()
es.clean_scroll_ids()
| 38.176056
| 113
| 0.533758
|
49b671f59e9c39669b90a36f09980c98c15bcd3b
| 442
|
py
|
Python
|
docker-deploy/web-app/Ride_Share/migrations/0025_auto_20200203_1931.py
|
universebh/ride_share
|
f5426b13ed256c6d6a0e966e2306da0f5fce8235
|
[
"MIT"
] | null | null | null |
docker-deploy/web-app/Ride_Share/migrations/0025_auto_20200203_1931.py
|
universebh/ride_share
|
f5426b13ed256c6d6a0e966e2306da0f5fce8235
|
[
"MIT"
] | null | null | null |
docker-deploy/web-app/Ride_Share/migrations/0025_auto_20200203_1931.py
|
universebh/ride_share
|
f5426b13ed256c6d6a0e966e2306da0f5fce8235
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.9 on 2020-02-04 00:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Ride_Share', '0024_auto_20200203_1929'),
]
operations = [
migrations.AlterField(
model_name='myuser',
name='email',
field=models.EmailField(help_text='Required', max_length=254, verbose_name='Email address'),
),
]
| 23.263158
| 104
| 0.624434
|
e7a5b22d59e2cfa4f723314eefc955e28c4a849a
| 630
|
py
|
Python
|
api/schemas/schemas.py
|
JackyCJ/flask-restful-login
|
1bd31ee3da8ca667dc7592123ad5ee700dcb2595
|
[
"MIT"
] | 76
|
2017-07-15T15:52:51.000Z
|
2022-03-30T04:45:05.000Z
|
api/schemas/schemas.py
|
JackyCJ/flask-restful-login
|
1bd31ee3da8ca667dc7592123ad5ee700dcb2595
|
[
"MIT"
] | 83
|
2019-08-10T06:58:11.000Z
|
2022-03-31T16:29:37.000Z
|
api/schemas/schemas.py
|
JackyCJ/flask-restful-login
|
1bd31ee3da8ca667dc7592123ad5ee700dcb2595
|
[
"MIT"
] | 23
|
2016-07-14T09:06:51.000Z
|
2022-03-28T07:54:39.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from marshmallow import Schema, fields
class BaseUserSchema(Schema):
"""
Base user schema returns all fields but this was not used in user handlers.
"""
# Schema parameters.
id = fields.Int(dump_only=True)
username = fields.Str()
email = fields.Str()
password = fields.Str()
created = fields.Str()
class UserSchema(Schema):
"""
User schema returns only username, email and creation time. This was used in user handlers.
"""
# Schema parameters.
username = fields.Str()
email = fields.Str()
created = fields.Str()
| 19.090909
| 95
| 0.639683
|
e2fdedd0e2c60b0ecc6c71da7e65a08d5de439b2
| 10,821
|
py
|
Python
|
dqn/dueling_network_old.py
|
20chase/cartpole_rl
|
687fc30f7e69f4850c545dce74f4e844d75fd732
|
[
"MIT"
] | 3
|
2019-07-18T09:09:47.000Z
|
2021-11-17T11:05:12.000Z
|
dqn/dueling_network_old.py
|
20chase/cartpole_rl
|
687fc30f7e69f4850c545dce74f4e844d75fd732
|
[
"MIT"
] | null | null | null |
dqn/dueling_network_old.py
|
20chase/cartpole_rl
|
687fc30f7e69f4850c545dce74f4e844d75fd732
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorlayer as tl
import gym
import numpy as np
import random
import os
from gym import wrappers
from collections import deque
# Hyper Parameters for DQN
GAMMA = 0.99 # discount factor for target Q
INITIAL_EPSILON = 1 # starting value of epsilon
FINAL_EPSILON = 0.01 # final value of epsilon
EXPLOER_NUM = 10000
REPLAY_SIZE = 20000 # experience replay buffer size
BATCH_SIZE = 64 # size of minibatch
LEARNING_RATE = 1e-4
DECLAY_FLAG = True
DECLAY_NUM = 1e-5
DISPLAY = False
SAVE = False
LOAD = False
MODE_NAME = 'LunarLander-v2'
# MODE_NAME = 'Atlantis-ram-v0'
EPISODE = 10000 # Episode limitation
STEP = 10000 # Step limitation in an episode
TEST = 100
UPDATE_TIME = 500
OBSERVE_NUM = 32
TARGET_NUM = 195
EVAL_FLAG = False
class DQN():
# DQN Agent
def __init__(self, env):
# init experience replay
self.replay_buffer = deque()
# init some parameters
self.time_step = 0
self.reward = 0
self.epsilon = INITIAL_EPSILON
self.state_dim = env.observation_space.shape[0]
self.action_dim = env.action_space.n
print 'state_dim:', self.state_dim, ' action_dim:', self.action_dim
self.create_Q_network()
self.create_Q_network_target()
self.create_training_method()
# Init session
self.session = tf.InteractiveSession()
self.merged = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter('/tmp/train', self.session.graph)
self.session.run(tf.global_variables_initializer())
def create_Q_network(self):
# input layer
self.state_input = tf.placeholder("float",[None,self.state_dim])
self.network = tl.layers.InputLayer(self.state_input, name='Input')
self.network = tl.layers.DenseLayer(self.network, n_units=200, act=tf.nn.relu, name='relu1')
self.network = tl.layers.DenseLayer(self.network, n_units=200, act=tf.nn.relu, name='relu2')
self.network = tl.layers.DenseLayer(self.network, n_units=200, act=tf.nn.relu, name='relu3')
self.network = tl.layers.DenseLayer(self.network, n_units=200, act=tf.nn.relu, name='relu4')
self.network_V = tl.layers.DenseLayer(self.network, n_units=200, act=tf.nn.relu, name='V_1')
self.network_V = tl.layers.DenseLayer(self.network_V, n_units=200, act=tf.nn.relu, name='V_2')
self.network_V = tl.layers.DenseLayer(self.network_V, n_units=1, name='output_V')
self.network_A = tl.layers.DenseLayer(self.network, n_units=200, act=tf.nn.relu, name='A_1')
self.network_A = tl.layers.DenseLayer(self.network_A, n_units=200, act=tf.nn.relu, name='A_2')
self.network_A = tl.layers.DenseLayer(self.network_A, n_units=self.action_dim, name='output_A')
self.value_function = self.network_V.outputs
self.advantage_function = self.network_A.outputs
def create_Q_network_target(self):
# input layer
self.state_input_target = tf.placeholder("float",[None,self.state_dim])
self.network_target = tl.layers.InputLayer(self.state_input_target, name='Input_target')
self.network_target = tl.layers.DenseLayer(self.network_target, n_units=200, act=tf.nn.relu, name='relu_target_1')
self.network_target = tl.layers.DenseLayer(self.network_target, n_units=200, act=tf.nn.relu, name='relu_target_2')
self.network_target = tl.layers.DenseLayer(self.network_target, n_units=200, act=tf.nn.relu, name='relu_target_3')
self.network_target = tl.layers.DenseLayer(self.network_target, n_units=200, act=tf.nn.relu, name='relu_target_4')
self.network_V_target = tl.layers.DenseLayer(self.network_target, n_units=200, act=tf.nn.relu, name='V_1_target')
self.network_V_target = tl.layers.DenseLayer(self.network_V_target, n_units=200, act=tf.nn.relu, name='V_2_target')
self.network_V_target = tl.layers.DenseLayer(self.network_V_target, n_units=1, name='output_V_target')
self.network_A_target = tl.layers.DenseLayer(self.network_target, n_units=200, act=tf.nn.relu, name='A_1_target')
self.network_A_target = tl.layers.DenseLayer(self.network_A_target, n_units=200, act=tf.nn.relu, name='A_2_target')
self.network_A_target = tl.layers.DenseLayer(self.network_A_target, n_units=self.action_dim, name='output_A_target')
self.value_function_target = self.network_V_target.outputs
self.advantage_function_target = self.network_A_target.outputs
def create_training_method(self):
self.action_input = tf.placeholder("float",[None,self.action_dim]) # one hot presentation
self.y_input = tf.placeholder("float",[None])
self.reward_sum = tf.placeholder("float")
self.epsilon_sum = tf.placeholder("float")
self.replay_size = tf.placeholder("float")
A_origin = tf.reduce_sum(tf.multiply(self.advantage_function, self.action_input),reduction_indices = 1)
A_baseline = tf.reduce_mean(self.advantage_function, reduction_indices = 1)
Q_action = self.value_function + (A_origin - A_baseline)
self.cost = tf.reduce_mean(tf.square(self.y_input - Q_action))
A_value = tf.reduce_mean(self.advantage_function, axis=0)
A_baseline_value = tf.reduce_mean(A_baseline)
V_value = tf.reduce_mean(self.value_function)
with tf.name_scope('loss'):
tf.summary.scalar('cost', self.cost)
with tf.name_scope('reward'):
tf.summary.scalar('reward_mean', self.reward_sum)
with tf.name_scope('Q_value_nomalize'):
tf.summary.scalar('Q_value', V_value + (A_value[0] - A_baseline_value))
tf.summary.scalar('Q_value', V_value + (A_value[1] - A_baseline_value))
# tf.summary.scalar('Q_value', V_value + (A_value[2] - A_baseline_value))
with tf.name_scope('param'):
tf.summary.scalar('epsilon', self.epsilon_sum)
tf.summary.scalar('replay_size', self.replay_size)
self.optimizer_1 = tf.train.AdamOptimizer(LEARNING_RATE).minimize(self.cost)
# self.optimizer_2 = tf.train.RMSPropOptimizer(0.00025,0.99,0.0,1e-7).minimize(self.cost)
# self.optimizer_3 = tf.train.RMSPropOptimizer(0.00025,0.99,0.0,1e-8).minimize(self.cost)
def perceive(self,state,action,reward,next_state,done):
one_hot_action = np.zeros(self.action_dim)
one_hot_action[action] = 1
self.replay_buffer.append((state,one_hot_action,reward,next_state,done))
if len(self.replay_buffer) > REPLAY_SIZE:
self.replay_buffer.popleft()
if len(self.replay_buffer) > BATCH_SIZE:
self.train_Q_network()
def write_reward(self, reward_sum):
self.reward = reward_sum
def train_Q_network(self):
self.time_step += 1
# Step 1: obtain random minibatch from replay memory
minibatch = random.sample(self.replay_buffer,BATCH_SIZE)
state_batch = [data[0] for data in minibatch]
action_batch = [data[1] for data in minibatch]
reward_batch = [data[2] for data in minibatch]
next_state_batch = [data[3] for data in minibatch]
# Step 2: calculate y
y_batch = []
value_target_batch = self.value_function_target.eval(feed_dict = {self.state_input_target:next_state_batch})
advantage_target_batch = self.advantage_function_target.eval(feed_dict = {self.state_input_target:next_state_batch})
advantage_baseline_batch = np.mean(advantage_target_batch, axis = 1)
advantage_baseline_batch = advantage_baseline_batch.reshape(BATCH_SIZE, 1)
advantage_batch = self.advantage_function.eval(feed_dict = {self.state_input:state_batch})
# print '1:', np.shape(value_target_batch)
# print '2:', np.shape(advantage_target_batch)
# print '3:', np.shape(advantage_baseline_batch)
# print '4:', np.shape(advantage_batch)
# print '1-1:', value_target_batch[0][0]
# print '3-1:', advantage_baseline_batch[0]
# print '4-1:', np.argmax(advantage_batch[0])
for i in range(0, BATCH_SIZE):
done = minibatch[i][4]
if done:
y_batch.append(reward_batch[i])
else :
y_batch.append(reward_batch[i] + GAMMA * (value_target_batch[i][0] + (advantage_target_batch[i][np.argmax(advantage_batch[i])] - advantage_baseline_batch[i][0])))
replay_size = len(self.replay_buffer)
summary, _ = self.session.run([self.merged, self.optimizer_1], feed_dict={
self.y_input:y_batch,
self.action_input:action_batch,
self.state_input:state_batch,
self.reward_sum:self.reward,
self.epsilon_sum:self.epsilon,
self.replay_size:replay_size})
self.train_writer.add_summary(summary, self.time_step)
if self.time_step % UPDATE_TIME == 0:
# print 'updating...'
tl.files.assign_params(self.session, self.network.all_params, self.network_target)
tl.files.assign_params(self.session, self.network_A.all_params, self.network_A_target)
tl.files.assign_params(self.session, self.network_V.all_params, self.network_V_target)
def egreedy_action(self,state):
if self.time_step < OBSERVE_NUM:
return random.randint(0,self.action_dim - 1)
if DECLAY_FLAG:
self.epsilon *= (1 - DECLAY_NUM)
else:
self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLOER_NUM
if self.epsilon < FINAL_EPSILON:
self.epsilon = FINAL_EPSILON
Q_value = self.advantage_function.eval(feed_dict = {self.state_input:[state]})[0]
if random.random() <= self.epsilon:
return random.randint(0,self.action_dim - 1)
else:
return np.argmax(Q_value)
def action(self,state):
return np.argmax(self.Q_value.eval(feed_dict = {
self.state_input:[state]
})[0])
def train_game():
env = gym.make(MODE_NAME)
if EVAL_FLAG:
env = wrappers.Monitor(env, '/tmp/' + MODE_NAME)
agent = DQN(env)
if LOAD is True:
params = tl.files.load_npz(name=MODE_NAME + '.npz')
tl.files.assign_params(agent.session, params, agent.network)
reward_mean = 0
reward_sum = 0
end_flag = False
for episode in xrange(EPISODE):
# initialize task
state = env.reset()
if end_flag:
break;
# Train
for step in xrange(STEP):
if DISPLAY is True:
env.render()
action = agent.egreedy_action(state) # e-greedy action for train
next_state,reward,done,_ = env.step(action)
reward_sum += reward
agent.perceive(state,action,reward,next_state,done)
state = next_state
if done:
agent.write_reward(reward_sum)
reward_mean += reward_sum
print 'epsido: ', episode, '... reward_sum: ', reward_sum
reward_sum = 0
if episode % TEST == 0:
if SAVE is True:
tl.files.save_npz(agent.network.all_params, name=MODE_NAME + '.npz')
reward_mean /= (TEST + 1)
if (reward_mean > TARGET_NUM):
end_flag = True
print 'episode:', episode, ' reward_mean:', reward_mean, ' epsilon: ', agent.epsilon
break
if __name__ == '__main__':
train_game()
if EVAL_FLAG:
gym.upload('/tmp/' + MODE_NAME, api_key='sk_nXYWtyR0CfjmTgSiJVJA')
| 37.442907
| 170
| 0.712503
|
69ad7163f8d258608d0f58bb9ccc2e396ca4ee6f
| 2,325
|
py
|
Python
|
setup.py
|
Wi11iamDing/toad
|
3b22cc9a5d83255d394da483ec47b0de5f862c07
|
[
"MIT"
] | 1
|
2021-04-29T08:59:26.000Z
|
2021-04-29T08:59:26.000Z
|
setup.py
|
lijihong111/toad
|
3b22cc9a5d83255d394da483ec47b0de5f862c07
|
[
"MIT"
] | null | null | null |
setup.py
|
lijihong111/toad
|
3b22cc9a5d83255d394da483ec47b0de5f862c07
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
from setuptools import setup, find_packages, Extension
NAME = 'toad'
CURRENT_PATH = os.path.abspath(os.path.dirname(__file__))
VERSION_FILE = os.path.join(CURRENT_PATH, NAME, 'version.py')
def get_version():
ns = {}
with open(VERSION_FILE) as f:
exec(f.read(), ns)
return ns['__version__']
def get_ext_modules():
from Cython.Build import cythonize
extensions = [
Extension('toad.c_utils', sources = ['toad/c_utils.pyx'], include_dirs = [np.get_include()]),
Extension('toad.merge', sources = ['toad/merge.pyx'], include_dirs = [np.get_include()]),
]
return cythonize(extensions)
def get_requirements(stage = None):
file_name = 'requirements'
if stage is not None:
file_name = f"{file_name}-{stage}"
requirements = []
with open(f"{file_name}.txt", 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('-'):
continue
requirements.append(line)
return requirements
setup(
name = NAME,
version = get_version(),
description = 'Toad is dedicated to facilitating model development process, especially for a scorecard.',
long_description = open('README.md', encoding = 'utf-8').read(),
long_description_content_type = 'text/markdown',
url = 'https://github.com/amphibian-dev/toad',
author = 'ESC Team',
author_email = 'secbone@gmail.com',
packages = find_packages(exclude = ['tests']),
include_dirs = [np.get_include()],
ext_modules = get_ext_modules(),
include_package_data = True,
python_requires = '>=3.6',
install_requires = get_requirements(),
extras_require = {
'nn': get_requirements('nn')
},
tests_require = get_requirements('test'),
license = 'MIT',
classifiers = [
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
entry_points = {
'console_scripts': [
'toad = toad.cli:main',
],
},
)
| 28.012048
| 109
| 0.610753
|
f161be15510630801958c75c71e0feeccffe4eeb
| 2,551
|
py
|
Python
|
train.py
|
x052/proxy-detector
|
bcd1fca7c11d784364197ff240dbfbe4bca9f895
|
[
"MIT"
] | null | null | null |
train.py
|
x052/proxy-detector
|
bcd1fca7c11d784364197ff240dbfbe4bca9f895
|
[
"MIT"
] | 1
|
2018-04-16T20:07:35.000Z
|
2018-04-17T17:27:55.000Z
|
train.py
|
x052/proxy-detector
|
bcd1fca7c11d784364197ff240dbfbe4bca9f895
|
[
"MIT"
] | 1
|
2019-11-18T15:30:35.000Z
|
2019-11-18T15:30:35.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
import itertools
import pandas as pd
from six.moves import urllib
import tensorflow as tf
CSV_COLUMNS = [
"isProxy",
"org",
"continentCode",
"countryCode"
]
org = tf.feature_column.categorical_column_with_hash_bucket(
"org", hash_bucket_size=10000, dtype=tf.string)
continentCode = tf.feature_column.categorical_column_with_hash_bucket(
"continentCode", hash_bucket_size=200, dtype=tf.string)
countryCode = tf.feature_column.categorical_column_with_hash_bucket(
"countryCode", hash_bucket_size=400, dtype=tf.string)
# Wide columns and deep columns.
base_columns = [
org, continentCode, countryCode
]
def build_estimator(model_dir):
"""Build an estimator."""
m = tf.estimator.LinearClassifier(
model_dir=model_dir, feature_columns=base_columns,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=1.0,
l2_regularization_strength=1.0)
)
return m
def input_fn(data_file, num_epochs, num_threads, shuffle):
"""Input builder function."""
df_data = pd.read_csv(
tf.gfile.Open(data_file),
names=CSV_COLUMNS,
skipinitialspace=True,
engine="python",
skiprows=1)
# remove NaN elements
df_data = df_data.dropna(how="any", axis=0)
labels = df_data["isProxy"].apply(lambda x: x).astype(int)
return tf.estimator.inputs.pandas_input_fn(
x=df_data,
y=labels,
batch_size=100,
num_epochs=num_epochs,
shuffle=shuffle,
num_threads=num_threads)
def train_and_eval():
"""Train and evaluate the model."""
m = build_estimator(model_dir)
# set num_epochs to None to get infinite stream of data.
m.train(
input_fn=input_fn(train_file_name, num_epochs=1, num_threads=1, shuffle=True),
steps=2000)
# set steps to None to run evaluation until all data consumed.
results = m.evaluate(
input_fn=input_fn(test_file_name, num_epochs=1, num_threads=1, shuffle=True),
steps=None)
print("model directory = %s" % model_dir)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
predictions = m.predict(input_fn=input_fn(test_file_name, num_epochs=1, num_threads=1, shuffle=False))
for p in predictions:
pass
train_file_name = "./data/testingIpData.csv"
test_file_name = "./data/trainingIpData.csv"
model_dir = "./model"
train_and_eval()
| 28.032967
| 106
| 0.713838
|
70d1f37da66e7f76b41422793c0a4900d1874961
| 94
|
py
|
Python
|
p.py
|
usha324/python
|
7aa967b8dac8cd0c466652db448cb7e405821389
|
[
"bzip2-1.0.6"
] | null | null | null |
p.py
|
usha324/python
|
7aa967b8dac8cd0c466652db448cb7e405821389
|
[
"bzip2-1.0.6"
] | null | null | null |
p.py
|
usha324/python
|
7aa967b8dac8cd0c466652db448cb7e405821389
|
[
"bzip2-1.0.6"
] | null | null | null |
str = "this is string example....programmer!!!";
print "Length of the string: ", len(str)
| 23.5
| 49
| 0.638298
|
b3480a956cd92e53b52bc9c62f69424a36d09e42
| 7,012
|
py
|
Python
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/spbsimedgeisidlist_cfeb124762b8e4653da4ea2e084e78c8.py
|
rfrye-github/ixnetwork_restpy
|
23eeb24b21568a23d3f31bbd72814ff55eb1af44
|
[
"MIT"
] | null | null | null |
uhd_restpy/testplatform/sessions/ixnetwork/topology/spbsimedgeisidlist_cfeb124762b8e4653da4ea2e084e78c8.py
|
rfrye-github/ixnetwork_restpy
|
23eeb24b21568a23d3f31bbd72814ff55eb1af44
|
[
"MIT"
] | null | null | null |
uhd_restpy/testplatform/sessions/ixnetwork/topology/spbsimedgeisidlist_cfeb124762b8e4653da4ea2e084e78c8.py
|
rfrye-github/ixnetwork_restpy
|
23eeb24b21568a23d3f31bbd72814ff55eb1af44
|
[
"MIT"
] | null | null | null |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class SpbSimEdgeIsidList(Base):
"""ISIS SPB Simulated Edge ISID Configuration
The SpbSimEdgeIsidList class encapsulates a required spbSimEdgeIsidList resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'spbSimEdgeIsidList'
_SDM_ATT_MAP = {
'Active': 'active',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Isid': 'isid',
'ItagEthernetType': 'itagEthernetType',
'Name': 'name',
'Rbit': 'rbit',
'Tbit': 'tbit',
'TransmissionType': 'transmissionType',
}
def __init__(self, parent):
super(SpbSimEdgeIsidList, self).__init__(parent)
@property
def Connector(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b.Connector): An instance of the Connector class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.connector_d0d942810e4010add7642d3914a1f29b import Connector
return Connector(self)
@property
def Active(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Isid(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): I-SID
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Isid']))
@property
def ItagEthernetType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): I-Tag Ethernet Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ItagEthernetType']))
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def Rbit(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): R Bit
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Rbit']))
@property
def Tbit(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): T Bit
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tbit']))
@property
def TransmissionType(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Transmission Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TransmissionType']))
def update(self, Name=None):
"""Updates spbSimEdgeIsidList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def get_device_ids(self, PortNames=None, Active=None, Isid=None, ItagEthernetType=None, Rbit=None, Tbit=None, TransmissionType=None):
"""Base class infrastructure that gets a list of spbSimEdgeIsidList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- Isid (str): optional regex of isid
- ItagEthernetType (str): optional regex of itagEthernetType
- Rbit (str): optional regex of rbit
- Tbit (str): optional regex of tbit
- TransmissionType (str): optional regex of transmissionType
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 36.520833
| 162
| 0.637336
|
0028cfd79b75082400e237f04c54f4139d91ab07
| 1,789
|
py
|
Python
|
integrations-and-supported-tools/kedro/scripts/kedro_neptune_quickstart/src/kedro_neptune_quickstart/__main__.py
|
neptune-ai/examples
|
e64cfaadb028e2187063fc43768dfee44074729b
|
[
"MIT"
] | 15
|
2021-06-11T16:35:15.000Z
|
2022-03-29T15:53:59.000Z
|
integrations-and-supported-tools/kedro/scripts/kedro_neptune_quickstart/src/kedro_neptune_quickstart/__main__.py
|
neptune-ai/examples
|
e64cfaadb028e2187063fc43768dfee44074729b
|
[
"MIT"
] | 12
|
2021-04-26T13:07:50.000Z
|
2021-11-15T10:50:03.000Z
|
integrations-and-supported-tools/kedro/scripts/kedro_neptune_quickstart/src/kedro_neptune_quickstart/__main__.py
|
neptune-ai/examples
|
e64cfaadb028e2187063fc43768dfee44074729b
|
[
"MIT"
] | 10
|
2021-05-07T16:28:18.000Z
|
2022-02-28T21:47:11.000Z
|
# Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""kedro_neptune_quickstart file for ensuring the package is executable
as `kedro_neptune_quickstart` and `python -m kedro_neptune_quickstart`
"""
from pathlib import Path
from kedro.framework.project import configure_project
from .cli import run
def main():
configure_project(Path(__file__).parent.name)
run()
if __name__ == "__main__":
main()
| 39.755556
| 77
| 0.775852
|
708305e655c9814e0319c7708eaf79d265c68a16
| 474
|
py
|
Python
|
Task/Handle-a-signal/Python/handle-a-signal-4.py
|
LaudateCorpus1/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | 1
|
2018-11-09T22:08:38.000Z
|
2018-11-09T22:08:38.000Z
|
Task/Handle-a-signal/Python/handle-a-signal-4.py
|
seanwallawalla-forks/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | null | null | null |
Task/Handle-a-signal/Python/handle-a-signal-4.py
|
seanwallawalla-forks/RosettaCodeData
|
9ad63ea473a958506c041077f1d810c0c7c8c18d
|
[
"Info-ZIP"
] | 1
|
2018-11-09T22:08:40.000Z
|
2018-11-09T22:08:40.000Z
|
import time, signal
class WeAreDoneException(Exception):
pass
def sigIntHandler(signum, frame):
signal.signal(signal.SIGINT, signal.SIG_DFL) # resets to default handler
raise WeAreDoneException
t1 = time.time()
try:
signal.signal(signal.SIGINT, sigIntHandler)
n = 0
while True:
time.sleep(0.5)
n += 1
print n
except WeAreDoneException:
pass
tdelt = time.time() - t1
print 'Program has run for %5.3f seconds.' % tdelt
| 19.75
| 76
| 0.675105
|
4655d4443673503005f8afcf41532854bee0b229
| 2,533
|
py
|
Python
|
tests/test_response.py
|
XiaoMutt/palpable
|
4c7f6352b1731c86e93232e2cc5fca63f440be19
|
[
"MIT"
] | null | null | null |
tests/test_response.py
|
XiaoMutt/palpable
|
4c7f6352b1731c86e93232e2cc5fca63f440be19
|
[
"MIT"
] | null | null | null |
tests/test_response.py
|
XiaoMutt/palpable
|
4c7f6352b1731c86e93232e2cc5fca63f440be19
|
[
"MIT"
] | null | null | null |
from math import sqrt
from time import sleep
from src.palpable.procedures.map_function import MapFunction
from src.palpable.procedures.procedure import Procedure
from src.palpable.procedures.run_function import RunFunction
from src.palpable.units.task import Task
from src.palpable.units.task_response import TaskResponse
from tests.basis import BaseTest
TEST_DATA = tuple([x * x for x in range(10)])
class SqrtNegative(Procedure):
def __init__(self, nums):
self.nums = nums
def run(self, messenger):
messenger.submit_tasks([Task(RunFunction(sqrt, -num)) for num in self.nums], need_followup=True)
return True
class TestResponse(BaseTest.Case):
num_of_workers = 1 # use only one worker
def test_tbd(self):
response = self.client.ajax_run_procedure(MapFunction(sqrt, TEST_DATA), 0)
self.assertEqual(TaskResponse.TBD.__name__, response["status"])
def test_success(self):
response = self.client.ajax_run_procedure(MapFunction(sqrt, TEST_DATA), -1)
self.assertEqual(TaskResponse.SUCCESS.__name__, response["status"])
self.assertEqual(tuple(map(sqrt, TEST_DATA)), tuple(response["data"]))
def test_error(self):
response = self.client.ajax_run_procedure(MapFunction(sqrt, [-1]), -1)
self.assertEqual(TaskResponse.ERROR.__name__, response["status"])
def test_none(self):
response = self.client.ajax_query_result("abc")
self.assertEqual(TaskResponse.NONE.__name__, response["status"])
def test_followup_ids(self):
response = self.client.ajax_run_procedure(SqrtNegative([-x for x in TEST_DATA]), -1)
self.assertEqual(TaskResponse.SUCCESS.__name__, response["status"])
self.assertTrue(response["status"])
self.assertEqual(len(TEST_DATA), len(response["followup_task_ids"]))
followup_task_ids = set(response["followup_task_ids"])
results = []
while len(followup_task_ids) > 0:
sleep(0.1)
done = set()
for task_id in followup_task_ids:
r = self.client.ajax_query_result(task_id)
self.assertTrue(r["status"] != TaskResponse.NONE.__name__)
self.assertTrue(r["status"] != TaskResponse.ERROR.__name__)
if r["status"] == TaskResponse.SUCCESS.__name__:
results.append(r["data"])
done.add(task_id)
followup_task_ids -= done
self.assertEqual(tuple(map(sqrt, TEST_DATA)), tuple(sorted(results)))
| 40.206349
| 104
| 0.680616
|
bb042d37ef8b7a2a287a7b72062d2223db1a6c13
| 648
|
py
|
Python
|
tests/test_matrix_props/test_is_square.py
|
paniash/toqito
|
ab67c2a3fca77b3827be11d1e79531042ea62b82
|
[
"MIT"
] | 76
|
2020-01-28T17:02:01.000Z
|
2022-02-14T18:02:15.000Z
|
tests/test_matrix_props/test_is_square.py
|
paniash/toqito
|
ab67c2a3fca77b3827be11d1e79531042ea62b82
|
[
"MIT"
] | 82
|
2020-05-31T20:09:38.000Z
|
2022-03-28T17:13:59.000Z
|
tests/test_matrix_props/test_is_square.py
|
paniash/toqito
|
ab67c2a3fca77b3827be11d1e79531042ea62b82
|
[
"MIT"
] | 30
|
2020-04-02T16:07:11.000Z
|
2022-02-05T13:39:22.000Z
|
"""Test is_square."""
import numpy as np
from toqito.matrix_props import is_square
def test_is_square():
"""Test that square matrix returns True."""
mat = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
np.testing.assert_equal(is_square(mat), True)
def test_is_not_square():
"""Test that non-square matrix returns False."""
mat = np.array([[1, 2, 3], [4, 5, 6]])
np.testing.assert_equal(is_square(mat), False)
def test_is_square_invalid():
"""Input must be a matrix."""
with np.testing.assert_raises(ValueError):
is_square(np.array([-1, 1]))
if __name__ == "__main__":
np.testing.run_module_suite()
| 24
| 53
| 0.646605
|
a8f1282c0a6fcfcdfa7a8e6bff3047858fa43481
| 739
|
py
|
Python
|
pytezos/__init__.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2021-05-20T16:52:08.000Z
|
2021-05-20T16:52:08.000Z
|
pytezos/__init__.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | 1
|
2020-12-30T16:44:56.000Z
|
2020-12-30T16:44:56.000Z
|
pytezos/__init__.py
|
juztin/pytezos-1
|
7e608ff599d934bdcf129e47db43dbdb8fef9027
|
[
"MIT"
] | null | null | null |
"""
Welcome to PyTezos!
To start playing with the Tezos blockchain you need to get a PyTezosClient instance.
Just type:
>>> from pytezos import pytezos
>>> pytezos
And follow the interactive documentation.
"""
from pytezos.rpc import RpcProvider, localhost, mainnet, babylonnet, zeronet
from pytezos.rpc.errors import *
from pytezos.crypto import Key
from pytezos.proto import Proto
from pytezos.michelson.contract import Contract
from pytezos.michelson.formatter import format_timestamp
from pytezos.client import PyTezosClient
from pytezos.operation.group import OperationGroup
from pytezos.michelson.interface import ContractInterface
from pytezos.standards.non_fungible_token import NonFungibleTokenImpl
pytezos = PyTezosClient()
| 29.56
| 84
| 0.830853
|
e7b2cffc2ff4cdac6e2211ed91b629da048220ec
| 4,501
|
py
|
Python
|
scripts/utils/HashPath.py
|
irblsensitivity/irblsensitivity
|
6ee1038d3daefdabc29c60a37dac627bc98498f9
|
[
"Apache-2.0"
] | 2
|
2019-06-08T14:19:55.000Z
|
2021-07-06T04:02:05.000Z
|
scripts/utils/HashPath.py
|
irblsensitivity/irblsensitivity
|
6ee1038d3daefdabc29c60a37dac627bc98498f9
|
[
"Apache-2.0"
] | null | null | null |
scripts/utils/HashPath.py
|
irblsensitivity/irblsensitivity
|
6ee1038d3daefdabc29c60a37dac627bc98498f9
|
[
"Apache-2.0"
] | 1
|
2017-05-29T14:03:20.000Z
|
2017-05-29T14:03:20.000Z
|
#-*- coding: utf-8 -*-
from __future__ import print_function
import urllib2
import hashlib
import os
###############################################################################################################
class HashPath(object):
@staticmethod
def sequence(_name, _name_size, _level):
'''
:param _path:
:param _name_size:
:param _level:
:return:
'''
# check _name_size
if _name_size <= 0:
return _name
bucket = u''
for i in range(0, _level):
sub = _name[(i*_name_size):((i+1)*_name_size)]
if sub == u'':
sub = u'0'*_name_size
bucket += sub +u'\\'
return bucket
@staticmethod
def encode(_path, _name_size, _level):
'''
create HASH path using last filename in give path
ex) /home/user/2d9e014f.txt
==> /home/user/2d/9e/01/2d92014f.txt (when level==3, namesize==2)
==> /home/user/2d9/e01/2d92014f.txt (when level==2, namesize==3)
:param _path:
:param _name_size:
:param _level:
:return:
'''
# check _name_size
if _name_size <= 0:
return _path
idx = _path.rfind(u'\\')
idx2 = _path.rfind(u'/')
if idx < idx2: idx = idx2
if idx == -1:
parent = ''
filename = _path[:]
else:
parent = _path[0:(idx+1)]
filename = _path[(idx+1):]
bucket = u''
for i in range(0, _level):
sub = filename[(i*_name_size):((i+1)*_name_size)]
if sub == u'':
sub = u'0' * _name_size
bucket += sub + os.path.sep
return parent + bucket + filename
@staticmethod
def split(_txt, _split, _count):
'''
text split spicified count, if _txt has no delimiter, it will be None.
:param _txt:
:param _split:
:param _count:
:return:
'''
result = ()
while _count > 0:
idx = _txt.index(_split)
if idx > 0:
part = _txt[(idx+1):]
_txt = _txt[0:idx]
else:
part = None
result += (part,)
_count -= 1
return result
@staticmethod
def url_to_path(_url, _only_host=False):
'''
make a relative path using url (domain\\path\\hashinfo_from_url)
:param _url:
:return:
'''
if _url is None:
return None
#make Hash Filename
m = hashlib.md5()
m.update(_url.encode('utf-8'))
filename = m.hexdigest()
#seperate url and make a path (\\domain\\path\\)
req = urllib2.Request(_url)
host = req.get_host() # domain:port
host = host.replace(u':', u'#') # change port delimiter
if _only_host is False:
path = req.get_selector() # choose selector after domain.
path = path.split(u'?')[0] # remove parameter
path = path.replace(os.path.altsep, os.path.sep) # altsep --> sep
if path.startswith(os.path.sep) is True:
path = path[1:]
host = os.path.join(host, path)
filepath = os.path.join(host ,filename)
return filepath
@staticmethod
def expend_hashpath(_path, _level, _name_size):
'''
make a relative path (parent\\hash_path\\filename)
:param _url:
:return:
'''
if _path is None:
return None
#seperate parent and filename
idx = _path.rfind(u'\\')
idx2 = _path.rfind(u'/')
if idx<idx2 : idx = idx2
if idx < 0: return None
#parent = os.path.abs(os.path.join(_path, os.pardir))
parent = _path[:(idx+1)]
filename = _path[(idx+1):]
#make Hash Filename
m = hashlib.md5()
m.update(filename)
hexfilename = m.hexdigest()
bucket = u''
for i in range(0, _level):
sub = hexfilename[(i*_name_size):((i+1)*_name_size)]
if sub == u'':
sub = u'0'*_name_size
bucket += sub + os.path.sep
return os.path.join(parent, bucket, filename)
@staticmethod
def reduce_hashpath(_path, _level, _name_size):
'''
remove a hash path (parent\\[hash_path\\]filename)
:param _url:
:return:
'''
if _path is None:
return None
#seperate parent and filename
idx = _path.rfind(u'\\')
idx2 = _path.rfind(u'/')
if idx<idx2 : idx = idx2
filename = _path[(idx+1):]
parent = _path[:idx]
#reduce
while _level>0:
idx = parent.rfind(u'\\')
idx2 = parent.rfind(u'/')
if idx<idx2 : idx = idx2
parent = parent [:idx]
_level -= 1
return os.path.join(parent, filename)
###############################################################################################################
###############################################################################################################
###############################################################################################################
if __name__ == '__main__':
path = HashPath.expend_hashpath(u'/home/user/bug/temp/file.txt', 2, 2)
print(HashPath.reduce_hashpath(path, 2,2,))
| 24.069519
| 111
| 0.567207
|
dceaf7acc91f910adc48b28e57882b6570097f42
| 1,253
|
py
|
Python
|
domain/sportPlan.py
|
zhuxiyulu/sugar
|
c780618aa6493779cc869e984f8e38be9314e1b8
|
[
"Apache-2.0"
] | 2
|
2018-06-14T15:28:10.000Z
|
2019-01-11T07:11:32.000Z
|
domain/sportPlan.py
|
zhuxiyulu/sugar
|
c780618aa6493779cc869e984f8e38be9314e1b8
|
[
"Apache-2.0"
] | null | null | null |
domain/sportPlan.py
|
zhuxiyulu/sugar
|
c780618aa6493779cc869e984f8e38be9314e1b8
|
[
"Apache-2.0"
] | null | null | null |
from sqlalchemy import Column, Integer, String, DATETIME
from domain.database import Base
# SportPlan 运动方案
class SportPlan(Base):
# 表的名字
__tablename__ = 'sportplan'
# 表的结构
userId = Column(Integer(), primary_key=True)
sport1 = Column(String())
sport2 = Column(String())
sport3 = Column(String())
sport4 = Column(String())
time1 = Column(String())
time2 = Column(String())
time3 = Column(String())
time4 = Column(String())
week1 = Column(String())
week2 = Column(String())
week3 = Column(String())
week4 = Column(String())
sportTime = Column(DATETIME())
def __repr__(self):
return "<SportPlan(userId='%s', sport1='%s', sport2='%s', sport3='%s', sport4='%s', time1='%s', time2='%s', time3='%s', time4='%s', week1='%s', week2='%s', week3='%s',week4='%s', sportTime='%s')>" % (
self.userIdm,
self.sport1,
self.sport2,
self.sport3,
self.sport4,
self.time1,
self.time2,
self.time3,
self.time4,
self.week1,
self.week2,
self.week3,
self.week4,
self.sportTime
)
| 29.833333
| 209
| 0.526736
|
8093b50db93868f19e02455b11df94d64fdd5155
| 1,225
|
py
|
Python
|
var/spack/repos/builtin/packages/r-conquer/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/r-conquer/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/r-conquer/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RConquer(RPackage):
"""Convolution-Type Smoothed Quantile Regression.
Fast and accurate convolution-type smoothed quantile regression.
Implemented using Barzilai-Borwein gradient descent with a Huber regression
warm start. Construct confidence intervals for regression coefficients
using multiplier bootstrap."""
cran = "conquer"
version('1.3.0', sha256='ac354e18c9ad6f41ed5200fad1c99fa5b124fc6fa5bba8f3434be2478f53d5fa')
version('1.2.1', sha256='1354f90f962a2124e155227cdc0ed2c6e54682f1e08934c49a827e51dc112f45')
version('1.0.2', sha256='542f6154ce1ffec0c1b4dd4e1f5b86545015f4b378c4c66a0840c65c57d674ff')
depends_on('r@3.5.0:', type=('build', 'run'))
depends_on('r-rcpp@1.0.3:', type=('build', 'run'))
depends_on('r-matrix', type=('build', 'run'))
depends_on('r-matrixstats', type=('build', 'run'))
depends_on('r-rcpparmadillo@0.9.850.1.0:', type=('build', 'run'))
depends_on('r-caret', type=('build', 'run'), when='@1.2.1')
| 40.833333
| 95
| 0.729796
|
03785f02832c9d201cd5e3ca4f50b84d58353a90
| 835
|
py
|
Python
|
src/oci/object_storage/transfer/internal/file_read_callback_stream.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/object_storage/transfer/internal/file_read_callback_stream.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/object_storage/transfer/internal/file_read_callback_stream.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
class FileReadCallbackStream:
def __init__(self, file, progress_callback):
self.progress_callback = progress_callback
self.file = file
self.mode = file.mode
# this is used by 'requests' to determine the Content-Length header using fstat
def fileno(self):
return self.file.fileno()
def read(self, n):
self.progress_callback(n)
return self.file.read(n)
def __getattr__(self, attr):
return getattr(self.file, attr)
| 37.954545
| 245
| 0.700599
|
a28bde0354ec2d1170984ce309d743dcd8ecb851
| 10,101
|
py
|
Python
|
packages/python-packages/doc-warden/warden/enforce_readme_content.py
|
scbedd/azure-sdk-tools
|
dcbe480642d53ea524438da39a3af758646b5bd3
|
[
"MIT"
] | 84
|
2015-01-01T23:40:15.000Z
|
2022-03-10T20:20:40.000Z
|
packages/python-packages/doc-warden/warden/enforce_readme_content.py
|
scbedd/azure-sdk-tools
|
dcbe480642d53ea524438da39a3af758646b5bd3
|
[
"MIT"
] | 2,219
|
2015-01-06T20:35:05.000Z
|
2022-03-31T23:36:52.000Z
|
packages/python-packages/doc-warden/warden/enforce_readme_content.py
|
scbedd/azure-sdk-tools
|
dcbe480642d53ea524438da39a3af758646b5bd3
|
[
"MIT"
] | 127
|
2015-01-02T01:43:51.000Z
|
2022-03-24T20:02:13.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import print_function
import os
import markdown2
import bs4
import re
from .warden_common import check_match, walk_directory_for_pattern, get_omitted_files
from .HeaderConstruct import HeaderConstruct
from docutils import core
from docutils.writers.html4css1 import Writer,HTMLTranslator
import logging
README_PATTERNS = ['*/readme.md', '*/readme.rst', '*/README.md', '*/README.rst']
CODE_FENCE_REGEX = r"\`\`\`([\s\S\n]*?)\`\`\`"
# entry point
def verify_readme_content(config):
all_readmes = walk_directory_for_pattern(config.target_directory, README_PATTERNS, config)
omitted_readmes = get_omitted_files(config)
targeted_readmes = [readme for readme in all_readmes if readme not in omitted_readmes]
known_issue_paths = config.get_known_content_issues()
section_sorting_dict = config.required_readme_sections
ignored_missing_readme_paths = []
readme_results = []
readmes_with_issues = []
for readme in targeted_readmes:
ext = os.path.splitext(readme)[1]
if ext == '.rst':
readme_results.append(verify_rst_readme(readme, config, section_sorting_dict))
else:
readme_results.append(verify_md_readme(readme, config, section_sorting_dict))
for readme_tuple in readme_results:
if readme_tuple[1]:
if readme_tuple[0] in known_issue_paths:
ignored_missing_readme_paths.append(readme_tuple)
else:
readmes_with_issues.append(readme_tuple)
return readmes_with_issues, ignored_missing_readme_paths
# parse rst to html, check for presence of appropriate sections
def verify_rst_readme(readme, config, section_sorting_dict):
with open(readme, 'r', encoding="utf-8") as f:
readme_content = f.read()
html_readme_content = rst_to_html(readme_content)
html_soup = bs4.BeautifulSoup(html_readme_content, "html.parser")
missed_patterns = find_missed_sections(html_soup, config.required_readme_sections)
return (readme, missed_patterns)
# parse md to html, check for presence of appropriate sections
def verify_md_readme(readme, config, section_sorting_dict):
if config.verbose_output:
print('Examining content in {}'.format(readme))
with open(readme, 'r', encoding="utf-8-sig") as f:
readme_content = f.read()
# we need to sanitize to remove the fenced code blocks. The reasoning here is that markdown2 is having issues
# parsing the pygments style that we use with github.
sanitized_html_content = re.sub(CODE_FENCE_REGEX, "", readme_content, flags=re.MULTILINE)
html_readme_content = markdown2.markdown(sanitized_html_content)
html_soup = bs4.BeautifulSoup(html_readme_content, "html.parser")
missed_patterns = find_missed_sections(html_soup, config.required_readme_sections)
return (readme, missed_patterns)
# within the entire readme, are there any missing sections that are expected?
def find_missed_sections(html_soup, patterns):
header_list = html_soup.find_all(re.compile('^h[1-4]$'))
flattened_patterns = flatten_pattern_config(patterns)
header_index = generate_header_index(header_list, flattened_patterns)
observed_failing_patterns = recursive_header_search(header_index, patterns, [])
return observed_failing_patterns
# gets a distinct set of ALL patterns present in a config. This is
# important because this allows us to precalculate which patterns a given header tag will match
def flatten_pattern_config(patterns):
observed_patterns = []
for pattern in patterns:
if isinstance(pattern, dict):
parent_pattern, child_patterns = next(iter(pattern.items()))
if child_patterns:
observed_patterns.extend(flatten_pattern_config(child_patterns))
observed_patterns.extend([parent_pattern])
else:
observed_patterns.extend([pattern])
return list(set(observed_patterns))
# recursive solution that walks all the rules and generates rule chains from them to test
# that the tree actually contains sets of headers that meet the required sections
def recursive_header_search(header_index, patterns, parent_pattern_chain=[]):
unobserved_patterns = []
if patterns:
for pattern in patterns:
if isinstance(pattern, dict):
parent_pattern, child_patterns = next(iter(pattern.items()))
if not match_regex_to_headers(header_index, parent_pattern_chain + [parent_pattern]):
unobserved_patterns.append(parent_pattern_chain + [parent_pattern])
parent_chain_for_children = parent_pattern_chain + [parent_pattern]
unobserved_patterns.extend(recursive_header_search(header_index, child_patterns, parent_chain_for_children))
else:
if not match_regex_to_headers(header_index, parent_pattern_chain + [pattern]):
unobserved_patterns.append((parent_pattern_chain + [pattern]))
return unobserved_patterns
# a set of headers looks like this
# h1
# h2
# h1
# h2
# h3
# h1
# any "indented" headers are children of the one above it IF the
# one above it is at a higher header level (this is actually < in comparison)
# result of above should be a web that looks like
# root
# h1
# h2
# h1
# h2
# h3
# h1
# this function examines a serial set of <h> tags and generates
# an index that allows us to interrogate a specific header for it's containing
# headers.
def generate_header_index(header_constructs, patterns):
previous_header_level = 0
current_header = None
root = HeaderConstruct(None, None)
current_parent = root
header_index = []
previous_node_level = 0
for index, header in enumerate(header_constructs):
# evaluate the level
current_level = int(header.name.replace('h', ''))
# h1 < h2 == we need to traverse up
if current_level < current_parent.level:
current_parent = current_parent.get_parent_by_level(current_level)
current_header = HeaderConstruct(header, current_parent, patterns)
# h2 > h1 == we need to indent, add the current as a child, and set parent to current
# for the forthcoming headers
elif current_level > current_parent.level:
current_header = HeaderConstruct(header, current_parent, patterns)
# only set current_parent if there are children below, which NECESSITATES that
# the very next header must A) exist and B) be > current_level
if index + 1 < len(header_constructs):
if int(header_constructs[index+1].name.replace('h', '')) > current_level:
current_parent = current_header
# current_header.level == current_parent.level
# we just need to add it as a child to our current header
else:
if previous_node_level > current_parent.level:
current_parent = current_parent.get_parent_by_level(current_level)
current_header = HeaderConstruct(header, current_parent, patterns)
previous_node_level = current_level
# always add the header to the node index, we will use it later
header_index.append(current_header)
return header_index
# checks the node index for a specific pattern or chain
# [^Getting started$, Install Package] is an example of a required set
def match_regex_to_headers(header_index, target_patterns):
# we should only be firing this for a "leaf" aka the END of the chain we're looking for, so the last element
# will always get popped first before we recurse across the rest
current_target = target_patterns.pop()
matching_headers = [header for header in header_index if current_target in header.matching_patterns]
# check all the leaf node parents for the matches. we don't want to artificially constrain though
# so we have to assume that a rule can match multiple children
for matching_leaf_header in matching_headers:
if target_patterns:
result = check_header_parents(matching_leaf_header, target_patterns[:])
else:
return re.search(current_target, matching_leaf_header.get_tag_text())
if result:
return matching_leaf_header
else:
continue
return None
# recursively ensure that a header_construct has parents that match the required headers
# the search ALLOWS GAPS, so a match will still be found if
#
# h1
# h2 (matching header)
# h3 (unmatched parent header, but this is ok)
# h4 (matching header)
def check_header_parents(header_construct, required_parent_headers):
if required_parent_headers:
target_parent = required_parent_headers.pop()
new_parent = header_construct.check_parents_for_pattern(target_parent)
if new_parent:
if required_parent_headers:
check_header_parents(header_construct, required_parent_headers)
else:
return True
else:
return False
else:
return False
# checks a header string against a set of configured patterns
def match_regex_set(header, patterns):
matching_patterns = []
for pattern in patterns:
result = re.search(pattern, header)
if result:
matching_patterns.append(pattern)
break
return matching_patterns
# boilerplate for translating RST
class HTMLFragmentTranslator(HTMLTranslator):
def __init__(self, document):
HTMLTranslator.__init__(self, document)
self.head_prefix = ['','','','','']
self.body_prefix = []
self.body_suffix = []
self.stylesheet = []
def astext(self):
return ''.join(self.body)
html_fragment_writer = Writer()
html_fragment_writer.translator_class = HTMLFragmentTranslator
# utilize boilerplate
def rst_to_html(input_rst):
return core.publish_string(input_rst, writer = html_fragment_writer)
| 39
| 124
| 0.711019
|
2a526c6d5f2afce2e736b2be4c2b36c36e364d12
| 238
|
py
|
Python
|
Desafio 96.py
|
MoomenEltelbany/PythonDesafios
|
aa2f44d3104cf3607f58dc42c2f8fc8023f128de
|
[
"MIT"
] | null | null | null |
Desafio 96.py
|
MoomenEltelbany/PythonDesafios
|
aa2f44d3104cf3607f58dc42c2f8fc8023f128de
|
[
"MIT"
] | null | null | null |
Desafio 96.py
|
MoomenEltelbany/PythonDesafios
|
aa2f44d3104cf3607f58dc42c2f8fc8023f128de
|
[
"MIT"
] | null | null | null |
def area(l, c):
print(f"A área de um terreno é {l} x {c} é de {l * c}m².")
print(f'Controle de Terreno.')
print(f'-' * 25)
larg = float(input('Largura (m): '))
comprimento = float(input('Comprimento (m): '))
area(larg, comprimento)
| 23.8
| 62
| 0.617647
|
72122ee5b599f9314849065e33f841c9cb092330
| 1,546
|
py
|
Python
|
ht/ht/urls.py
|
caoxuCarlos/htyw_ecust
|
90277333510de14d936153fa2b164efbc5d068b8
|
[
"MIT"
] | null | null | null |
ht/ht/urls.py
|
caoxuCarlos/htyw_ecust
|
90277333510de14d936153fa2b164efbc5d068b8
|
[
"MIT"
] | null | null | null |
ht/ht/urls.py
|
caoxuCarlos/htyw_ecust
|
90277333510de14d936153fa2b164efbc5d068b8
|
[
"MIT"
] | null | null | null |
"""ht URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from answers import views as answers_view
from main_page import views as main_page_view
from money_job import views as money_job_view
urlpatterns = [
path('admin/', admin.site.urls),
path('homepage/', include('main_page.urls')),
# hijack homepage url to answers page (prevent unauthorized bad information)
# path('homepage/', answers_view.ResourceListView.as_view(), name='home'),
path('tip/', include('tip.urls')),
path('airyslide/', include('airyslide.urls')),
path('answers/', answers_view.ResourceListView.as_view(), name='answers'),
path('contribute/', main_page_view.contribute, name='contribute'),
path('search/', main_page_view.search_function, name='search'),
path('jobs/', money_job_view.JobListView.as_view(), name='jobs'),
path('job_submit/', money_job_view.submit_job, name='job_submit'),
]
| 42.944444
| 80
| 0.718629
|
258f401238607a4b40a55c85c25ac5aa91257ebb
| 430
|
py
|
Python
|
tests/data/workflow-tests/actors/notscheduledfourthactor/actor.py
|
dhodovsk/leapp
|
bcd6580a19dabd132b3da8bcf2ed61fa8864ef18
|
[
"Apache-2.0"
] | 29
|
2019-05-29T05:34:52.000Z
|
2022-03-14T19:09:34.000Z
|
tests/data/workflow-tests/actors/notscheduledfourthactor/actor.py
|
dhodovsk/leapp
|
bcd6580a19dabd132b3da8bcf2ed61fa8864ef18
|
[
"Apache-2.0"
] | 373
|
2018-11-21T11:41:49.000Z
|
2022-03-31T11:40:56.000Z
|
tests/data/workflow-tests/actors/notscheduledfourthactor/actor.py
|
dhodovsk/leapp
|
bcd6580a19dabd132b3da8bcf2ed61fa8864ef18
|
[
"Apache-2.0"
] | 27
|
2018-11-26T17:14:15.000Z
|
2022-03-10T13:30:50.000Z
|
from leapp.actors import Actor
from leapp.tags import FourthPhaseTag
class NotScheduledFourthActor(Actor):
name = 'not_scheduled_fourth_actor'
description = 'No description has been provided for the not_scheduled_fourth_actor actor.'
consumes = ()
produces = ()
tags = (FourthPhaseTag,)
def process(self):
from leapp.libraries.common.test_helper import log_execution
log_execution(self)
| 28.666667
| 94
| 0.737209
|
65d20f2e9e1cbd4698cbacb2989d58fb47e5cc0f
| 8,169
|
py
|
Python
|
salt/modules/solaris_shadow.py
|
ifraixedes/saltstack-salt
|
b54becb8b43cc9b7c00b2c0bc637ac534dc62896
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
salt/modules/solaris_shadow.py
|
ifraixedes/saltstack-salt
|
b54becb8b43cc9b7c00b2c0bc637ac534dc62896
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
salt/modules/solaris_shadow.py
|
ifraixedes/saltstack-salt
|
b54becb8b43cc9b7c00b2c0bc637ac534dc62896
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
Manage the password database on Solaris systems
.. important::
If you feel that Salt should be using this module to manage passwords on a
minion, and it is using a different module (or gives an error similar to
*'shadow.info' is not available*), see :ref:`here
<module-provider-override>`.
"""
import os
import salt.utils.files
from salt.exceptions import CommandExecutionError
try:
import spwd
HAS_SPWD = True
except ImportError:
# SmartOS joyent_20130322T181205Z does not have spwd
HAS_SPWD = False
try:
import pwd
except ImportError:
pass # We're most likely on a Windows machine.
try:
import salt.utils.pycrypto
HAS_CRYPT = True
except ImportError:
HAS_CRYPT = False
# Define the module's virtual name
__virtualname__ = "shadow"
def __virtual__():
"""
Only work on POSIX-like systems
"""
if __grains__.get("kernel", "") == "SunOS":
return __virtualname__
return (
False,
"The solaris_shadow execution module failed to load: only available on Solaris"
" systems.",
)
def default_hash():
"""
Returns the default hash used for unset passwords
CLI Example:
.. code-block:: bash
salt '*' shadow.default_hash
"""
return "!"
def info(name):
"""
Return information for the specified user
CLI Example:
.. code-block:: bash
salt '*' shadow.info root
"""
if HAS_SPWD:
try:
data = spwd.getspnam(name)
ret = {
"name": data.sp_nam,
"passwd": data.sp_pwd,
"lstchg": data.sp_lstchg,
"min": data.sp_min,
"max": data.sp_max,
"warn": data.sp_warn,
"inact": data.sp_inact,
"expire": data.sp_expire,
}
except KeyError:
ret = {
"name": "",
"passwd": "",
"lstchg": "",
"min": "",
"max": "",
"warn": "",
"inact": "",
"expire": "",
}
return ret
# SmartOS joyent_20130322T181205Z does not have spwd, but not all is lost
# Return what we can know
ret = {
"name": "",
"passwd": "",
"lstchg": "",
"min": "",
"max": "",
"warn": "",
"inact": "",
"expire": "",
}
try:
data = pwd.getpwnam(name)
ret.update({"name": name})
except KeyError:
return ret
# To compensate for lack of spwd module, read in password hash from /etc/shadow
s_file = "/etc/shadow"
if not os.path.isfile(s_file):
return ret
with salt.utils.files.fopen(s_file, "r") as ifile:
for line in ifile:
comps = line.strip().split(":")
if comps[0] == name:
ret.update({"passwd": comps[1]})
# For SmartOS `passwd -s <username>` and the output format is:
# name status mm/dd/yy min max warn
#
# Fields:
# 1. Name: username
# 2. Status:
# - LK: locked
# - NL: no login
# - NP: No password
# - PS: Password
# 3. Last password change
# 4. Minimum age
# 5. Maximum age
# 6. Warning period
output = __salt__["cmd.run_all"]("passwd -s {}".format(name), python_shell=False)
if output["retcode"] != 0:
return ret
fields = output["stdout"].split()
if len(fields) == 2:
# For example:
# root NL
return ret
# We have all fields:
# buildbot L 05/09/2013 0 99999 7
ret.update(
{
"name": data.pw_name,
"lstchg": fields[2],
"min": int(fields[3]),
"max": int(fields[4]),
"warn": int(fields[5]),
"inact": "",
"expire": "",
}
)
return ret
def set_maxdays(name, maxdays):
"""
Set the maximum number of days during which a password is valid. See man
passwd.
CLI Example:
.. code-block:: bash
salt '*' shadow.set_maxdays username 90
"""
pre_info = info(name)
if maxdays == pre_info["max"]:
return True
cmd = "passwd -x {} {}".format(maxdays, name)
__salt__["cmd.run"](cmd, python_shell=False)
post_info = info(name)
if post_info["max"] != pre_info["max"]:
return post_info["max"] == maxdays
def set_mindays(name, mindays):
"""
Set the minimum number of days between password changes. See man passwd.
CLI Example:
.. code-block:: bash
salt '*' shadow.set_mindays username 7
"""
pre_info = info(name)
if mindays == pre_info["min"]:
return True
cmd = "passwd -n {} {}".format(mindays, name)
__salt__["cmd.run"](cmd, python_shell=False)
post_info = info(name)
if post_info["min"] != pre_info["min"]:
return post_info["min"] == mindays
return False
def gen_password(password, crypt_salt=None, algorithm="sha512"):
"""
.. versionadded:: 2015.8.8
Generate hashed password
.. note::
When called this function is called directly via remote-execution,
the password argument may be displayed in the system's process list.
This may be a security risk on certain systems.
password
Plaintext password to be hashed.
crypt_salt
Crpytographic salt. If not given, a random 8-character salt will be
generated.
algorithm
The following hash algorithms are supported:
* md5
* blowfish (not in mainline glibc, only available in distros that add it)
* sha256
* sha512 (default)
CLI Example:
.. code-block:: bash
salt '*' shadow.gen_password 'I_am_password'
salt '*' shadow.gen_password 'I_am_password' crypt_salt='I_am_salt' algorithm=sha256
"""
if not HAS_CRYPT:
raise CommandExecutionError(
"gen_password is not available on this operating system "
'because the "crypt" python module is not available.'
)
return salt.utils.pycrypto.gen_hash(crypt_salt, password, algorithm)
def del_password(name):
"""
.. versionadded:: 2015.8.8
Delete the password from name user
CLI Example:
.. code-block:: bash
salt '*' shadow.del_password username
"""
cmd = "passwd -d {}".format(name)
__salt__["cmd.run"](cmd, python_shell=False, output_loglevel="quiet")
uinfo = info(name)
return not uinfo["passwd"]
def set_password(name, password):
"""
Set the password for a named user. The password must be a properly defined
hash, the password hash can be generated with this command:
``openssl passwd -1 <plaintext password>``
CLI Example:
.. code-block:: bash
salt '*' shadow.set_password root $1$UYCIxa628.9qXjpQCjM4a..
"""
s_file = "/etc/shadow"
ret = {}
if not os.path.isfile(s_file):
return ret
lines = []
with salt.utils.files.fopen(s_file, "r") as ifile:
for line in ifile:
comps = line.strip().split(":")
if comps[0] != name:
lines.append(line)
continue
comps[1] = password
line = ":".join(comps)
lines.append("{}\n".format(line))
with salt.utils.files.fopen(s_file, "w+") as ofile:
lines = [salt.utils.stringutils.to_str(_l) for _l in lines]
ofile.writelines(lines)
uinfo = info(name)
return uinfo["passwd"] == password
def set_warndays(name, warndays):
"""
Set the number of days of warning before a password change is required.
See man passwd.
CLI Example:
.. code-block:: bash
salt '*' shadow.set_warndays username 7
"""
pre_info = info(name)
if warndays == pre_info["warn"]:
return True
cmd = "passwd -w {} {}".format(warndays, name)
__salt__["cmd.run"](cmd, python_shell=False)
post_info = info(name)
if post_info["warn"] != pre_info["warn"]:
return post_info["warn"] == warndays
return False
| 24.981651
| 92
| 0.56543
|
126f0d2031e1c77d504842b2d3bead682c8382cf
| 1,885
|
py
|
Python
|
emannotationschemas/schemas/functional_coregistration.py
|
seung-lab/EMAnnotationSchemas
|
ef1061ca78860d53f1cd180496e87ad685a32ffe
|
[
"MIT"
] | null | null | null |
emannotationschemas/schemas/functional_coregistration.py
|
seung-lab/EMAnnotationSchemas
|
ef1061ca78860d53f1cd180496e87ad685a32ffe
|
[
"MIT"
] | 18
|
2018-08-15T17:38:04.000Z
|
2022-02-18T02:08:28.000Z
|
emannotationschemas/schemas/functional_coregistration.py
|
fcollman/EMAnnotationSchemas
|
bcc9f308868b2ea3e1504089d9cbab878b626acc
|
[
"MIT"
] | 5
|
2018-08-14T22:39:49.000Z
|
2021-10-05T16:36:07.000Z
|
import marshmallow as mm
from emannotationschemas.schemas.base import AnnotationSchema, BoundSpatialPoint
class FunctionalCoregistration(AnnotationSchema):
pt = mm.fields.Nested(
BoundSpatialPoint,
required=True,
description="location of cell body of functional cell",
)
func_id = mm.fields.Int(required=True, description="functional cell ID")
class FunctionalUnitCoregistration(AnnotationSchema):
pt = mm.fields.Nested(
BoundSpatialPoint,
required=True,
description="location of cell body of functional cell",
)
session = mm.fields.Int(required=True, description="session ID of imaging")
scan_idx = mm.fields.Int(
required=True, description="index of the scan within the session"
)
unit_id = mm.fields.Int(
required=True, description="unique functional cell ID within the scan"
)
class FunctionalUnitCoregistrationExtended(AnnotationSchema):
pt = mm.fields.Nested(
BoundSpatialPoint,
required=True,
description="location of cell body of functional cell",
)
session = mm.fields.Int(required=True, description="session ID of imaging")
scan_idx = mm.fields.Int(
required=True, description="index of the scan within the session"
)
unit_id = mm.fields.Int(
required=True, description="unique functional cell ID within the scan"
)
nucleus_id = mm.fields.Int(required=True, description="matched id of nucleus")
field = mm.fields.Int(
required=False, description="index of imaging field of cell within the scan"
)
residual = mm.fields.Float(
required=False,
description="distance between nucleus centroid and functional centroid after transformation",
)
score = mm.fields.Float(
required=False, description="confidence score associated with match"
)
| 35.566038
| 101
| 0.698674
|
3b9a0a328abdc6a69c0a4982c8e0b17d67fab54e
| 162
|
py
|
Python
|
data/groups.py
|
melipharo/stru-python19
|
6df494f6812e96d87bdfffdcbd3602e764a893c9
|
[
"BSD-2-Clause"
] | null | null | null |
data/groups.py
|
melipharo/stru-python19
|
6df494f6812e96d87bdfffdcbd3602e764a893c9
|
[
"BSD-2-Clause"
] | null | null | null |
data/groups.py
|
melipharo/stru-python19
|
6df494f6812e96d87bdfffdcbd3602e764a893c9
|
[
"BSD-2-Clause"
] | null | null | null |
from model import Group
test_data = [
Group(name="name1", header="header1", footer="footer1"),
Group(name="name2", header="header2", footer="footer2")
]
| 23.142857
| 60
| 0.67284
|
13da69071d7265dafb926eeee478ce2441ffa08f
| 3,179
|
py
|
Python
|
Multidimentional Lists Exerscise/08. Miner.py
|
nrgxtra/advanced
|
89f713419102c64a04b495f717cdb51bbf94213c
|
[
"MIT"
] | null | null | null |
Multidimentional Lists Exerscise/08. Miner.py
|
nrgxtra/advanced
|
89f713419102c64a04b495f717cdb51bbf94213c
|
[
"MIT"
] | null | null | null |
Multidimentional Lists Exerscise/08. Miner.py
|
nrgxtra/advanced
|
89f713419102c64a04b495f717cdb51bbf94213c
|
[
"MIT"
] | null | null | null |
rows_count = int(input())
field = []
commands = []
[commands.append(x) for x in input().split(' ')]
for _ in range(rows_count):
field.append([])
[field[-1].append(x) for x in input().split()]
coals = 0
current_coal = 0
for row in field:
sub_sum = row.count('c')
coals += sub_sum
initial_start = []
for i in range(len(field)):
for j in range(len(field)):
if field[i][j] == 's':
initial_start.append(i)
initial_start.append(j)
initial_row = initial_start[0]
initial_column = initial_start[1]
current_position = field[initial_row][initial_column]
row = initial_row
column = initial_column
end_or_mineall = False
for command in commands:
if command == 'left':
if column - 1 >= 0:
row = row
column = column-1
if field[row][column] == 'e':
print(f'Game over! ({row}, {column})')
end_or_mineall = True
break
elif field[row][column] == 'c':
current_coal += 1
field[row][column] = '*'
if current_coal == coals:
print(f"You collected all coals! ({row}, {column})")
end_or_mineall = True
break
elif command == 'right':
if column + 1 < len(field):
row = row
column = column + 1
if field[row][column] == 'e':
print(f'Game over! ({row}, {column})')
end_or_mineall = True
break
elif field[row][column] == 'c':
current_coal += 1
field[row][column] = '*'
if current_coal == coals:
print(f"You collected all coals! ({row}, {column})")
end_or_mineall = True
break
elif command == 'up':
if row - 1 >= 0:
row = row - 1
column = column
if field[row][column] == 'e':
print(f'Game over! ({row}, {column})')
end_or_mineall = True
break
elif field[row][column] == 'c':
current_coal += 1
field[row][column] = '*'
if current_coal == coals:
print(f"You collected all coals! ({row}, {column})")
end_or_mineall = True
break
elif command == 'down':
if row + 1 < len(field):
row = row + 1
column = column
if field[row][column] == 'e':
print(f'Game over! ({row}, {column})')
end_or_mineall = True
break
elif field[row][column] == 'c':
current_coal += 1
field[row][column] = '*'
if current_coal == coals:
print(f"You collected all coals! ({row}, {column})")
end_or_mineall = True
break
if not end_or_mineall:
coals_left = coals - current_coal
print(f"{coals_left} coals left. ({row}, {column})")
| 34.554348
| 73
| 0.459578
|
dfaf92562fae8edd342d6c523f9f3fc00f6fcea5
| 921
|
py
|
Python
|
src/app/api/crud.py
|
khemx3/esdapi
|
e5bfbef0c5884761887c27c94934495e33be6283
|
[
"MIT"
] | null | null | null |
src/app/api/crud.py
|
khemx3/esdapi
|
e5bfbef0c5884761887c27c94934495e33be6283
|
[
"MIT"
] | null | null | null |
src/app/api/crud.py
|
khemx3/esdapi
|
e5bfbef0c5884761887c27c94934495e33be6283
|
[
"MIT"
] | null | null | null |
from app.api.models import BookSchema
from app.db import books, database
async def post(payload: BookSchema):
query = books.insert().values(title=payload.title, description=payload.description, price=payload.price)
return await database.execute(query=query)
async def get(id: int):
query = books.select().where(id == books.c.id)
return await database.fetch_one(query=query)
async def get_all():
query = books.select()
return await database.fetch_all(query=query)
async def put(id: int, payload: BookSchema):
query = (
books
.update()
.where(id == books.c.id)
.values(title=payload.title, description=payload.description, price=payload.price)
.returning(books.c.id)
)
return await database.execute(query=query)
async def delete(id: int):
query = books.delete().where(id == books.c.id)
return await database.execute(query=query)
| 27.088235
| 108
| 0.690554
|
f80464b6cf8c4b85731c80081eea878f076601ba
| 1,603
|
py
|
Python
|
website/utils/ml_utils.py
|
TOXiNdeep2503/makeabilitylabwebsite
|
6e57ee4f928742b30f1e933166dc8fa2b6b11d05
|
[
"MIT"
] | 3
|
2020-10-01T20:44:24.000Z
|
2022-01-20T05:51:24.000Z
|
website/utils/ml_utils.py
|
TOXiNdeep2503/makeabilitylabwebsite
|
6e57ee4f928742b30f1e933166dc8fa2b6b11d05
|
[
"MIT"
] | 76
|
2020-07-17T18:23:29.000Z
|
2022-03-18T12:16:56.000Z
|
website/utils/ml_utils.py
|
TOXiNdeep2503/makeabilitylabwebsite
|
6e57ee4f928742b30f1e933166dc8fa2b6b11d05
|
[
"MIT"
] | 99
|
2020-09-30T13:32:25.000Z
|
2022-03-07T07:51:54.000Z
|
"""
Hosts general utility functions for Makeability Lab Django website
"""
import re
# helper function to correctly capitalize a string, specify words to not capitalize in the articles list
# from: https://stackoverflow.com/a/3729957
# Note: this code written by J. Gilkeson and needs to be cleaned up (and/or removed if no longer needed)
def capitalize_title(s, exceptions):
word_list = re.split(' ', s) # re.split behaves as expected
final = [word_list[0].capitalize()]
for word in word_list[1:]:
final.append(word if word in exceptions else word.capitalize())
return " ".join(final)
#Standard list of words to not capitalize in a sentence
articles = ['a', 'an', 'and', 'as', 'at', 'but', 'by', 'for', 'from', 'is', 'of', 'on', 'or', 'nor', 'the', 'to', 'up', 'yet']
def get_video_embed(video_url):
"""Returns proper embed code for a video url"""
if 'youtu.be' in video_url or 'youtube.com' in video_url:
# https://youtu.be/i0IDbHGir-8 or https://www.youtube.com/watch?v=i0IDbHGir-8
base_url = "https://youtube.com/embed"
unique_url = video_url[video_url.find("/", 9):]
# See https://developers.google.com/youtube/youtube_player_demo for details on parameterizing YouTube video
return base_url + unique_url + "?showinfo=0&iv_load_policy=3"
elif 'vimeo' in video_url:
# https://player.vimeo.com/video/164630179
vimeo_video_id = video_url.rsplit('/', 1)[-1]
return "https://player.vimeo.com/video/" + vimeo_video_id
else:
return "unknown video service for '{}'".format(video_url)
| 45.8
| 126
| 0.672489
|
06568f074a993b42a33371e64e2104c70b9987c2
| 6,947
|
py
|
Python
|
keystone/controllers.py
|
yanheven/keystone
|
417b8941095f40674575ed951b4a03ebcdc91fef
|
[
"Apache-2.0"
] | null | null | null |
keystone/controllers.py
|
yanheven/keystone
|
417b8941095f40674575ed951b4a03ebcdc91fef
|
[
"Apache-2.0"
] | null | null | null |
keystone/controllers.py
|
yanheven/keystone
|
417b8941095f40674575ed951b4a03ebcdc91fef
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_serialization import jsonutils
import webob
from keystone.common import extension
from keystone.common import json_home
from keystone.common import wsgi
from keystone import exception
LOG = log.getLogger(__name__)
MEDIA_TYPE_JSON = 'application/vnd.openstack.identity-%s+json'
_VERSIONS = []
# NOTE(blk-u): latest_app will be set by keystone.service.loadapp(). It gets
# set to the application that was just loaded. In the case of keystone-all,
# loadapp() gets called twice, once for the public app and once for the admin
# app. In the case of httpd/keystone, loadapp() gets called once for the public
# app if this is the public instance or loadapp() gets called for the admin app
# if it's the admin instance.
# This is used to fetch the /v3 JSON Home response. The /v3 JSON Home response
# is the same whether it's the admin or public service so either admin or
# public works.
latest_app = None
def request_v3_json_home(new_prefix):
if 'v3' not in _VERSIONS:
# No V3 support, so return an empty JSON Home document.
return {'resources': {}}
req = webob.Request.blank(
'/v3', headers={'Accept': 'application/json-home'})
v3_json_home_str = req.get_response(latest_app).body
v3_json_home = jsonutils.loads(v3_json_home_str)
json_home.translate_urls(v3_json_home, new_prefix)
return v3_json_home
class Extensions(wsgi.Application):
"""Base extensions controller to be extended by public and admin API's."""
# extend in subclass to specify the set of extensions
@property
def extensions(self):
return None
def get_extensions_info(self, context):
return {'extensions': {'values': self.extensions.values()}}
def get_extension_info(self, context, extension_alias):
try:
return {'extension': self.extensions[extension_alias]}
except KeyError:
raise exception.NotFound(target=extension_alias)
class AdminExtensions(Extensions):
@property
def extensions(self):
return extension.ADMIN_EXTENSIONS
class PublicExtensions(Extensions):
@property
def extensions(self):
return extension.PUBLIC_EXTENSIONS
def register_version(version):
_VERSIONS.append(version)
class MimeTypes(object):
JSON = 'application/json'
JSON_HOME = 'application/json-home'
def v3_mime_type_best_match(context):
# accept_header is a WebOb MIMEAccept object so supports best_match.
accept_header = context['accept_header']
if not accept_header:
return MimeTypes.JSON
SUPPORTED_TYPES = [MimeTypes.JSON, MimeTypes.JSON_HOME]
return accept_header.best_match(SUPPORTED_TYPES)
class Version(wsgi.Application):
def __init__(self, version_type, routers=None):
self.endpoint_url_type = version_type
self._routers = routers
super(Version, self).__init__()
def _get_identity_url(self, context, version):
"""Returns a URL to keystone's own endpoint."""
url = self.base_url(context, self.endpoint_url_type)
return '%s/%s/' % (url, version)
def _get_versions_list(self, context):
"""The list of versions is dependent on the context."""
versions = {}
if 'v2.0' in _VERSIONS:
versions['v2.0'] = {
'id': 'v2.0',
'status': 'stable',
'updated': '2014-04-17T00:00:00Z',
'links': [
{
'rel': 'self',
'href': self._get_identity_url(context, 'v2.0'),
}, {
'rel': 'describedby',
'type': 'text/html',
'href': 'http://docs.openstack.org/'
}
],
'media-types': [
{
'base': 'application/json',
'type': MEDIA_TYPE_JSON % 'v2.0'
}
]
}
if 'v3' in _VERSIONS:
versions['v3'] = {
'id': 'v3.4',
'status': 'stable',
'updated': '2015-03-30T00:00:00Z',
'links': [
{
'rel': 'self',
'href': self._get_identity_url(context, 'v3'),
}
],
'media-types': [
{
'base': 'application/json',
'type': MEDIA_TYPE_JSON % 'v3'
}
]
}
return versions
def get_versions(self, context):
req_mime_type = v3_mime_type_best_match(context)
if req_mime_type == MimeTypes.JSON_HOME:
v3_json_home = request_v3_json_home('/v3')
return wsgi.render_response(
body=v3_json_home,
headers=(('Content-Type', MimeTypes.JSON_HOME),))
versions = self._get_versions_list(context)
return wsgi.render_response(status=(300, 'Multiple Choices'), body={
'versions': {
'values': versions.values()
}
})
def get_version_v2(self, context):
versions = self._get_versions_list(context)
if 'v2.0' in _VERSIONS:
return wsgi.render_response(body={
'version': versions['v2.0']
})
else:
raise exception.VersionNotFound(version='v2.0')
def _get_json_home_v3(self):
def all_resources():
for router in self._routers:
for resource in router.v3_resources:
yield resource
return {
'resources': dict(all_resources())
}
def get_version_v3(self, context):
versions = self._get_versions_list(context)
if 'v3' in _VERSIONS:
req_mime_type = v3_mime_type_best_match(context)
if req_mime_type == MimeTypes.JSON_HOME:
return wsgi.render_response(
body=self._get_json_home_v3(),
headers=(('Content-Type', MimeTypes.JSON_HOME),))
return wsgi.render_response(body={
'version': versions['v3']
})
else:
raise exception.VersionNotFound(version='v3')
| 31.721461
| 79
| 0.594933
|
edc439c62c137d285f73e4a09a0e203c8fad3aeb
| 839
|
py
|
Python
|
tests/test_robotFinder.py
|
JDownloader/GEL-3014_Design3
|
dea01245592c97f73e6a78426270d91ade5e25c7
|
[
"MIT"
] | null | null | null |
tests/test_robotFinder.py
|
JDownloader/GEL-3014_Design3
|
dea01245592c97f73e6a78426270d91ade5e25c7
|
[
"MIT"
] | null | null | null |
tests/test_robotFinder.py
|
JDownloader/GEL-3014_Design3
|
dea01245592c97f73e6a78426270d91ade5e25c7
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from baseStation.robotIPFinder import RobotFinder
class TestRobotFinder(TestCase):
ROBOT_MAC = RobotFinder.ROBOT_MAC
FAKE_ROBOT_IP = "192.168.0.10"
lines_of_arp_exit = "? (10.248.0.1) at 0:0:c:7:ac:a on en1 ifscope [ethernet]\n"+ \
"? (" + FAKE_ROBOT_IP + ") at " + ROBOT_MAC + " on en1 ifscope [ethernet]\n" + \
"? (169.254.255.255) at 0:0:c:7:ac:a on en1 [ethernet]"
def pass_function(self):
pass
def setUp(self):
self.robot_finder = RobotFinder(self.pass_function)
def test_attempt_find(self):
self.robot_finder._attempt_find()
self.assertTrue(True)
def test_attempt_parse_real(self):
ip = self.robot_finder._parse_answer(self.lines_of_arp_exit)
assert(ip, self.FAKE_ROBOT_IP)
| 34.958333
| 104
| 0.644815
|
bbe02cd48b72926242ced350bafa69474c2fbd21
| 1,631
|
py
|
Python
|
igrfcoord/plots.py
|
scivision/igrfcoord
|
f50946eccbd4ab587ea7182d521d1ebd1f2cb229
|
[
"BSD-2-Clause"
] | 2
|
2016-04-01T18:03:13.000Z
|
2016-04-02T19:35:32.000Z
|
igrfcoord/plots.py
|
scivision/igrfcoord
|
f50946eccbd4ab587ea7182d521d1ebd1f2cb229
|
[
"BSD-2-Clause"
] | null | null | null |
igrfcoord/plots.py
|
scivision/igrfcoord
|
f50946eccbd4ab587ea7182d521d1ebd1f2cb229
|
[
"BSD-2-Clause"
] | 3
|
2020-08-04T12:51:11.000Z
|
2021-11-01T09:19:45.000Z
|
import logging
import pandas
try:
from matplotlib.pyplot import figure
import cartopy.crs as ccrs
except (ImportError, RuntimeError) as e:
logging.error(f"plotting disabled {e}")
figure = None
def _sitecol(line: pandas.Series) -> str:
if line.name == "HST":
c = "red"
elif line.name == "PFISR":
c = "blue"
else:
c = "black"
return c
def plotgeomag(latlon: pandas.DataFrame):
if figure is None:
return
ax = figure().gca()
for _, c in latlon.iterrows():
ax.scatter(
c.at["mlon"], c.at["mlat"], s=180, facecolors="none",
)
# edgecolors=_sitecol(l))
ax.set_xlabel("magnetic longitude [deg.]")
ax.set_ylabel("magnetic latitude [deg.]")
ax.grid(True)
ax.set_title("Geomagnetic")
# for _,c in latlon.iterrows():
# ax.text(c.at['mlon'], c.at['mlat'], c.site.item(),
# ha='center', va='center', fontsize=8)
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.get_yaxis().get_major_formatter().set_useOffset(False)
# %% geographic
if latlon.shape[0] < 2:
return
ax = figure().gca(projection=ccrs.PlateCarree())
ax.stock_img()
for _, c in latlon.iterrows():
ax.scatter(
c.at["glon"],
c.at["glat"],
s=180,
facecolors="none",
# edgecolors=_sitecol(l),
transform=ccrs.Geodetic(),
)
ax.set_extent(
(latlon.loc[:, "glon"].min(), latlon.loc[:, "glon"].max(), latlon.loc[:, "glat"].min(), latlon.loc[:, "glat"].max())
)
| 25.484375
| 124
| 0.556714
|
5ea0c76e88ae91603a3e4c900c085fbf5fa94716
| 8,857
|
py
|
Python
|
act/plotting/WindRoseDisplay.py
|
ajsockol/ACT
|
976002b50bade6cf1ad3d86ffabe3aed9d6e0bbb
|
[
"BSD-3-Clause"
] | null | null | null |
act/plotting/WindRoseDisplay.py
|
ajsockol/ACT
|
976002b50bade6cf1ad3d86ffabe3aed9d6e0bbb
|
[
"BSD-3-Clause"
] | null | null | null |
act/plotting/WindRoseDisplay.py
|
ajsockol/ACT
|
976002b50bade6cf1ad3d86ffabe3aed9d6e0bbb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
act.plotting.WindRoseDisplay
----------------------------
Stores the class for WindRoseDisplay.
"""
import matplotlib.pyplot as plt
import numpy as np
import warnings
from .plot import Display
# Import Local Libs
from ..utils import datetime_utils as dt_utils
class WindRoseDisplay(Display):
"""
A class for handing wind rose plots.
This is inherited from the :func:`act.plotting.Display`
class and has therefore has the same attributes as that class.
See :func:`act.plotting.Display`
for more information. There are no additional attributes or parameters
to this class.
Examples
--------
To create a WindRoseDisplay object, simply do:
.. code-block :: python
sonde_ds = act.io.armfiles.read_netcdf('sonde_data.nc')
WindDisplay = act.plotting.WindRoseDisplay(sonde_ds, figsize=(8,10))
"""
def __init__(self, obj, subplot_shape=(1,), ds_name=None, **kwargs):
super().__init__(obj, subplot_shape, ds_name,
subplot_kw=dict(projection='polar'), **kwargs)
def set_thetarng(self, trng=(0., 360.), subplot_index=(0,)):
"""
Sets the theta range of the wind rose plot.
Parameters
----------
trng : 2-tuple
The range (in degrees).
subplot_index : 2-tuple
The index of the subplot to set the degree range of.
"""
if self.axes is not None:
self.axes[subplot_index].set_thetamin(trng[0])
self.axes[subplot_index].set_thetamax(trng[1])
self.trng = trng
else:
raise RuntimeError(("Axes must be initialized before" +
" changing limits!"))
print(self.trng)
def set_rrng(self, rrng, subplot_index=(0,)):
"""
Sets the range of the radius of the wind rose plot.
Parameters
----------
rrng : 2-tuple
The range for the plot radius (in %).
subplot_index : 2-tuple
The index of the subplot to set the radius range of.
"""
if self.axes is not None:
self.axes[subplot_index].set_rmin(rrng[0])
self.axes[subplot_index].set_rmax(rrng[1])
self.rrng = rrng
else:
raise RuntimeError(("Axes must be initialized before" +
" changing limits!"))
def plot(self, dir_field, spd_field, dsname=None, subplot_index=(0,),
cmap=None, set_title=None, num_dirs=20, spd_bins=None,
tick_interval=3, legend_loc=0, legend_bbox=None, legend_title=None,
calm_threshold=1.,
**kwargs):
"""
Makes the wind rose plot from the given dataset.
Parameters
----------
dir_field : str
The name of the field representing the wind direction (in degrees).
spd_field : str
The name of the field representing the wind speed.
dsname : str
The name of the datastream to plot from. Set to None to
let ACT automatically try to determine this.
subplot_index : 2-tuple
The index of the subplot to place the plot on.
cmap : str or matplotlib colormap
The name of the matplotlib colormap to use.
set_title : str
The title of the plot.
num_dirs : int
The number of directions to split the wind rose into.
spd_bins : 1D array-like
The bin boundaries to sort the wind speeds into.
tick_interval : int
The interval (in %) for the ticks on the radial axis.
legend_loc : int
Legend location using matplotlib legend code
legend_bbox : tuple
Legend bounding box coordinates
legend_title : string
Legend title
calm_threshold : float
Winds below this threshold are considered to be calm.
**kwargs : keyword arguments
Additional keyword arguments will be passed into :func:plt.bar
Returns
-------
ax : matplotlib axis handle
The matplotlib axis handle corresponding to the plot.
"""
if dsname is None and len(self._arm.keys()) > 1:
raise ValueError(("You must choose a datastream when there are 2 "
"or more datasets in the TimeSeriesDisplay "
"object."))
elif dsname is None:
dsname = list(self._arm.keys())[0]
# Get data and dimensions
dir_data = self._arm[dsname][dir_field].values
spd_data = self._arm[dsname][spd_field].values
# Get the current plotting axis, add day/night background and plot data
if self.fig is None:
self.fig = plt.figure()
if self.axes is None:
self.axes = np.array([plt.axes(projection='polar')])
self.fig.add_axes(self.axes[0])
if spd_bins is None:
spd_bins = np.linspace(0, np.nanmax(spd_data), 10)
# Make the bins so that 0 degrees N is in the center of the first bin
# We need to wrap around
deg_width = 360. / num_dirs
dir_bins_mid = np.linspace(0., 360. - 3 * deg_width / 2., num_dirs)
wind_hist = np.zeros((num_dirs, len(spd_bins) - 1))
for i in range(num_dirs):
if i == 0:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered in.*")
the_range = np.logical_or(dir_data < deg_width / 2.,
dir_data > 360. - deg_width / 2.)
else:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered in.*")
the_range = np.logical_and(
dir_data >= dir_bins_mid[i] - deg_width / 2,
dir_data <= dir_bins_mid[i] + deg_width / 2)
hist, bins = np.histogram(spd_data[the_range], spd_bins)
wind_hist[i] = hist
wind_hist = wind_hist / np.sum(wind_hist) * 100
mins = np.deg2rad(dir_bins_mid)
# Do the first level
if 'units' in self._arm[dsname][spd_field].attrs.keys():
units = self._arm[dsname][spd_field].attrs['units']
else:
units = ''
the_label = ("%3.1f" % spd_bins[0] +
'-' + "%3.1f" % spd_bins[1] + " " + units)
our_cmap = plt.cm.get_cmap(cmap)
our_colors = our_cmap(np.linspace(0, 1, len(spd_bins)))
bars = [self.axes[subplot_index].bar(mins, wind_hist[:, 0],
bottom=0,
label=the_label,
width=0.8 * np.deg2rad(deg_width),
color=our_colors[0],
**kwargs)]
for i in range(1, len(spd_bins) - 1):
the_label = ("%3.1f" % spd_bins[i] +
'-' + "%3.1f" % spd_bins[i + 1] + " " + units)
# Changing the bottom to be a sum of the previous speeds so that
# it positions it correctly - Adam Theisen
bars.append(self.axes[subplot_index].bar(
mins, wind_hist[:, i], label=the_label,
bottom=np.sum(wind_hist[:, :i], axis=1), width=0.8 * np.deg2rad(deg_width),
color=our_colors[i], **kwargs))
self.axes[subplot_index].legend(loc=legend_loc, bbox_to_anchor=legend_bbox,
title=legend_title)
self.axes[subplot_index].set_theta_zero_location("N")
self.axes[subplot_index].set_theta_direction(-1)
# Add an annulus with text stating % of time calm
pct_calm = np.sum(spd_data <= calm_threshold) / len(spd_data) * 100
self.axes[subplot_index].set_rorigin(-2.5)
self.axes[subplot_index].annotate("%3.2f%%\n calm" % pct_calm, xy=(0, -2.5), ha='center', va='center')
# Set the ticks to be nice numbers
tick_max = tick_interval * round(
np.nanmax(np.cumsum(wind_hist, axis=1)) / tick_interval)
rticks = np.arange(0, tick_max, tick_interval)
rticklabels = [("%d" % x + '%') for x in rticks]
self.axes[subplot_index].set_rticks(rticks)
self.axes[subplot_index].set_yticklabels(rticklabels)
# Set Title
if set_title is None:
set_title = ' '.join([dsname, 'on',
dt_utils.numpy_to_arm_date(
self._arm[dsname].time.values[0])])
self.axes[subplot_index].set_title(set_title)
return self.axes[subplot_index]
| 39.190265
| 110
| 0.557638
|
b5b58b1d72358047f66d72c261487ded7e143d68
| 283
|
py
|
Python
|
template/{{cookiecutter.pkg_name}}/setup.py
|
azazdeaz/ros-grpc-wrapper
|
8438ed371630294f3845cd238958103041f233de
|
[
"MIT"
] | 18
|
2020-09-10T06:03:32.000Z
|
2022-03-14T14:05:17.000Z
|
template/{{cookiecutter.pkg_name}}/setup.py
|
azazdeaz/ros-grpc-wrapper
|
8438ed371630294f3845cd238958103041f233de
|
[
"MIT"
] | 1
|
2021-08-24T09:41:42.000Z
|
2021-08-24T09:41:42.000Z
|
template/{{cookiecutter.pkg_name}}/setup.py
|
azazdeaz/ros-grpc-wrapper
|
8438ed371630294f3845cd238958103041f233de
|
[
"MIT"
] | 3
|
2021-02-21T04:19:21.000Z
|
2021-11-12T06:24:37.000Z
|
from setuptools import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['ros_pb2', 'ros_pb2_grpc', '{{cookiecutter.pkg_name}}'],
package_dir={'': 'src'})
setup(**setup_args)
| 28.3
| 70
| 0.763251
|
92baa11d0b9d0dc768f74909952c316cc3314100
| 308
|
py
|
Python
|
leetcode/medium/top-k-frequent-elements.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 8
|
2019-05-14T12:50:29.000Z
|
2022-03-01T09:08:27.000Z
|
leetcode/medium/top-k-frequent-elements.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 46
|
2019-03-24T20:59:29.000Z
|
2019-04-09T16:28:43.000Z
|
leetcode/medium/top-k-frequent-elements.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 1
|
2022-01-28T12:46:29.000Z
|
2022-01-28T12:46:29.000Z
|
import heapq
from collections import Counter
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
nums = Counter(nums)
return [
x[0]
for x in heapq.nlargest(k, [(nr, value) for nr, value in nums.items()], key=lambda nr: nr[1])
]
| 25.666667
| 105
| 0.577922
|
52582496addf1038f8f747b268df5c1077cc6d66
| 252
|
py
|
Python
|
manage.py
|
openwater/h2o-really
|
bb6ae678cc4f505450684a2579e3f0196236e8dc
|
[
"Unlicense"
] | 3
|
2015-05-25T07:41:42.000Z
|
2020-05-18T05:50:40.000Z
|
manage.py
|
openwater/h2o-really
|
bb6ae678cc4f505450684a2579e3f0196236e8dc
|
[
"Unlicense"
] | null | null | null |
manage.py
|
openwater/h2o-really
|
bb6ae678cc4f505450684a2579e3f0196236e8dc
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "openwater.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.909091
| 73
| 0.77381
|
a1f08465cddd3aa48517bd3532a1ab111ba0d0be
| 50
|
py
|
Python
|
kolga/plugins/exception.py
|
Hi-Fi/kolga
|
821169fc24eb7e3883e6b4993ac75117a0c59766
|
[
"MIT"
] | null | null | null |
kolga/plugins/exception.py
|
Hi-Fi/kolga
|
821169fc24eb7e3883e6b4993ac75117a0c59766
|
[
"MIT"
] | null | null | null |
kolga/plugins/exception.py
|
Hi-Fi/kolga
|
821169fc24eb7e3883e6b4993ac75117a0c59766
|
[
"MIT"
] | null | null | null |
class TestCouldNotLoadPlugin(Exception):
pass
| 16.666667
| 40
| 0.8
|
8f088e8f72f684f3807e6c50987f9c23cb262889
| 957
|
py
|
Python
|
azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/models/replication_paged.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/models/replication_paged.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2018_09_01/models/replication_paged.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ReplicationPaged(Paged):
"""
A paging container for iterating over a list of :class:`Replication <azure.mgmt.containerregistry.v2018_09_01.models.Replication>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Replication]'}
}
def __init__(self, *args, **kwargs):
super(ReplicationPaged, self).__init__(*args, **kwargs)
| 34.178571
| 141
| 0.583072
|
57bcbfb0954db2c91ec2ed22506d36d008fe3876
| 29,958
|
py
|
Python
|
cdpcli/clidriver.py
|
anuragpatro/cdpcli
|
fe2b78308e4f8c09aa9609a43a646f314fa20327
|
[
"Apache-2.0"
] | 7
|
2020-10-01T14:03:57.000Z
|
2022-02-23T17:47:08.000Z
|
cdpcli/clidriver.py
|
anuragpatro/cdpcli
|
fe2b78308e4f8c09aa9609a43a646f314fa20327
|
[
"Apache-2.0"
] | 1
|
2020-11-30T08:00:50.000Z
|
2020-12-01T08:00:20.000Z
|
cdpcli/clidriver.py
|
anuragpatro/cdpcli
|
fe2b78308e4f8c09aa9609a43a646f314fa20327
|
[
"Apache-2.0"
] | 7
|
2020-11-07T20:55:47.000Z
|
2021-11-09T18:45:33.000Z
|
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Modifications made by Cloudera are:
# Copyright (c) 2016 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import platform
import socket
import sys
from cdpcli import LIST_TYPE
from cdpcli import RELEASE
from cdpcli import VERSION
from cdpcli import xform_name
from cdpcli.argparser import ArgTableArgParser
from cdpcli.argparser import MainArgParser
from cdpcli.argparser import ServiceArgParser
from cdpcli.argprocess import unpack_argument
from cdpcli.arguments import BooleanArgument
from cdpcli.arguments import CLIArgument
from cdpcli.arguments import CustomArgument
from cdpcli.arguments import ListArgument
from cdpcli.arguments import UnknownArgumentError
from cdpcli.clicommand import CLICommand
from cdpcli.client import ClientCreator
from cdpcli.client import Context
from cdpcli.compat import copy_kwargs
from cdpcli.compat import OrderedDict
from cdpcli.compat import six
from cdpcli.config import Config
from cdpcli.endpoint import EndpointCreator
from cdpcli.endpoint import EndpointResolver
from cdpcli.exceptions import ExtensionImportError
from cdpcli.exceptions import InvalidConfiguredFormFactor
from cdpcli.exceptions import ProfileNotFound
from cdpcli.exceptions import WrongOpFormFactorError
from cdpcli.exceptions import WrongSvcFormFactorError
from cdpcli.extensions.arguments import OverrideRequiredArgsArgument
from cdpcli.extensions.cliinputjson import add_cli_input_json
from cdpcli.extensions.configure.classify import ClassifyDeployment
from cdpcli.extensions.configure.classify import DeploymentType
from cdpcli.extensions.configure.configure import ConfigureCommand
from cdpcli.extensions.generatecliskeleton import add_generate_skeleton
from cdpcli.extensions.interactivelogin import LoginCommand
from cdpcli.extensions.logout import LogoutCommand
from cdpcli.extensions.paginate import add_pagination_params
from cdpcli.extensions.paginate import check_should_enable_pagination
from cdpcli.extensions.refdoc import RefdocCommand
from cdpcli.formatter import get_formatter
from cdpcli.help import OperationHelpCommand
from cdpcli.help import ProviderHelpCommand
from cdpcli.help import ServiceHelpCommand
from cdpcli.loader import Loader
from cdpcli.model import ServiceModel
from cdpcli.paramfile import ParamFileVisitor
from cdpcli.parser import ResponseParserFactory
from cdpcli.retryhandler import create_retry_handler
from cdpcli.translate import build_retry_config
from cdpcli.utils import get_extension_registers
import urllib3.util.connection as urllib3_connection
LOG = logging.getLogger('cdpcli.clidriver')
ROOT_LOGGER = logging.getLogger('')
LOG_FORMAT = ('%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s')
def main():
driver = CLIDriver()
return driver.main()
class CLIDriver(object):
def __init__(self):
self._loader = Loader()
self._endpoint_creator = EndpointCreator(EndpointResolver())
self._user_agent_header = self._build_user_agent_header()
self._response_parser_factory = ResponseParserFactory()
self._cli_data = self._loader.load_json('cli.json')
self._retryhandler = self._create_default_retryhandler()
self._available_services = self._loader.list_available_services()
self._command_table = self._build_command_table()
self._argument_table = self._build_argument_table()
self._context = Context()
self._client_creator = ClientCreator(self._loader,
self._context,
self._endpoint_creator,
self._user_agent_header,
self._response_parser_factory,
self._retryhandler)
def main(self, args=None):
if args is None:
args = sys.argv[1:]
parser = self._create_parser()
command_table = self._get_command_table()
if len(args) == 0 or (len(args) == 1 and args[0] == '--help'):
args = ['help']
parsed_args, remaining = parser.parse_known_args(args)
try:
self._handle_top_level_args(parsed_args)
self._filter_command_table_for_form_factor(parsed_args)
self._warn_for_old_python()
self._warn_for_non_public_release()
return command_table[parsed_args.command](
self._client_creator, remaining, parsed_args)
except Exception as e:
LOG.debug("Exception caught in main()", exc_info=True)
sys.stderr.write("\n")
sys.stderr.write("%s\n" % six.text_type(e))
return 255
def _get_loader(self):
return self._loader
def _get_cli_data(self):
return self._cli_data
def _get_command_table(self):
return self._command_table
def _build_user_agent_header(self):
return 'CDPCLI/%s Python/%s %s/%s' % (VERSION,
platform.python_version(),
platform.system(),
platform.release())
def _build_command_table(self):
commands = OrderedDict()
services = self._get_available_services()
for service_name in services:
commands[service_name] = ServiceCommand(self, service_name)
ConfigureCommand.add_command(commands)
LoginCommand.add_command(commands)
LogoutCommand.add_command(commands)
RefdocCommand.add_command(commands)
commands = OrderedDict(sorted(commands.items()))
return commands
def _filter_command_table_for_form_factor(self, parsed_args):
"""
Replaces services and operations in the command table that do not apply
to the current form factor with stubs that error out when called.
"""
if parsed_args.command == 'refdoc':
# Do not filter out any command if it is to generate help documents.
return
# Find the form factor based on:
# 1. the form factor explicitly specified by --form-factor, or else
# 2. the configured form factor, or else
# 3. the explicit endpoint URL, or else
# 4. the configured CDP endpoint URL.
if parsed_args.form_factor:
form_factor = parsed_args.form_factor
else:
try:
form_factor = self._context.get_scoped_config().get('form_factor', None)
except ProfileNotFound:
form_factor = None
valid_form_factors = [dt.value for dt in list(DeploymentType)]
if form_factor and form_factor not in valid_form_factors:
raise InvalidConfiguredFormFactor(
form_factor=form_factor,
valid_form_factors=valid_form_factors)
if not form_factor:
endpoint_url = parsed_args.endpoint_url
if not endpoint_url:
try:
endpoint_url = self._context.get_scoped_config(). \
get(EndpointResolver.CDP_ENDPOINT_URL_KEY_NAME, None)
except ProfileNotFound:
endpoint_url = None
form_factor = \
ClassifyDeployment(endpoint_url).get_deployment_type().value
LOG.debug("Current form factor is {}".format(form_factor))
for command in list(self._command_table.keys()):
try:
# If the service does not apply to the current form factor,
# filter it out.
service_model = self._command_table[command].service_model
service_form_factors = service_model.form_factors
if form_factor not in service_form_factors:
self._command_table[command] = \
FilteredServiceCommand(self, command, form_factor,
service_form_factors)
else:
for operation_name in service_model.operation_names:
# If the operation does not apply to the current form
# factor, filter it out.
operation_model = service_model.operation_model(operation_name)
operation_form_factors = operation_model.form_factors
if not operation_form_factors:
operation_form_factors = service_form_factors
if form_factor not in operation_form_factors:
self._command_table[command]. \
filter_operation(operation_name, form_factor,
operation_form_factors)
except AttributeError:
# not a service model, so available in all form factors
pass
def _get_argument_table(self):
return self._argument_table
def _build_argument_table(self):
argument_table = OrderedDict()
cli_data = self._get_cli_data()
cli_arguments = cli_data.get('options', None)
for option in cli_arguments:
option_params = copy_kwargs(cli_arguments[option])
cli_argument = self._create_cli_argument(option, option_params)
cli_argument.add_to_arg_table(argument_table)
return argument_table
def _get_available_services(self):
return self._available_services
def get_service_model(self, service_name):
service_data = self._loader.load_service_data(service_name)
service_data['paths'] = OrderedDict(sorted(service_data.get('paths', {}).items()))
return ServiceModel(service_data, service_name=service_name)
def _create_help_command(self):
cli_data = self._get_cli_data()
# We filter service aliases out of the service list at the bottom of the
# top level help.
commands = OrderedDict()
for service_name, command in self._get_command_table().items():
if not self._loader.is_service_alias(service_name):
commands[service_name] = command
return ProviderHelpCommand(commands,
self._get_argument_table(),
cli_data.get('description', None),
cli_data.get('synopsis', None),
cli_data.get('help_usage', None))
def _create_parser(self):
command_table = self._get_command_table()
command_table['help'] = self._create_help_command()
cli_data = self._get_cli_data()
parser = MainArgParser(
command_table,
VERSION,
cli_data.get('description', None),
self._get_argument_table())
return parser
def _create_cli_argument(self, option_name, option_params):
return CustomArgument(
option_name,
help_text=option_params.get('help', ''),
dest=option_params.get('dest'),
default=option_params.get('default'),
action=option_params.get('action'),
required=option_params.get('required'),
choices=option_params.get('choices'),
cli_type_name=option_params.get('type'),
hidden=option_params.get('hidden', False))
def _handle_top_level_args(self, args):
if args.profile:
self._client_creator.context.set_config_variable('profile',
args.profile)
if args.auth_config:
self._client_creator.context.set_config_variable('auth_config',
args.auth_config)
if args.debug:
self._setup_logger(logging.DEBUG)
LOG.debug("CLI version: %s", self._user_agent_header)
LOG.debug("Arguments entered to CLI: %s", sys.argv[1:])
else:
self._setup_logger(logging.WARNING)
if args.force_ipv4:
# Based on SO /a/46972341
LOG.debug("Forcing IPv4 connections only")
def _allowed_gai_family():
return socket.AF_INET
urllib3_connection.allowed_gai_family = _allowed_gai_family
def _setup_logger(self, log_level):
ROOT_LOGGER.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(log_level)
formatter = logging.Formatter(LOG_FORMAT)
ch.setFormatter(formatter)
ROOT_LOGGER.addHandler(ch)
def _create_default_retryhandler(self):
# We create one retryhandler based on the __default__ configuration in
# the _retry.json (in the 'data' directory). This retryhandler is used
# by all services.
config = self._load_retry_config()
if not config:
return
LOG.info("Using retry config: %s" % config)
return create_retry_handler(config)
def _load_retry_config(self):
original_config = self._loader.load_json('_retry.json')
retry_config = build_retry_config(
original_config['retry'],
original_config.get('definitions', {}))
return retry_config
def _warn_for_old_python(self):
if sys.version_info[0] < 3 or \
(sys.version_info[0] == 3 and sys.version_info[1] < 6):
LOG.warn('You are running the CDP CLI under Python %s. The CDP CLI '
'now requires Python 3.6 or higher. Please upgrade now to '
'avoid CLI errors.', sys.version)
def _warn_for_non_public_release(self):
if RELEASE != 'PUBLIC':
if RELEASE == 'INTERNAL':
article = 'an'
else:
article = 'a'
LOG.warn('You are running {0} {1} release of the CDP CLI, which '
'has different capabilities from the standard public '
'release. Find the public release at: '
'https://pypi.org/project/cdpcli/'.format(article, RELEASE))
class ServiceCommand(CLICommand):
"""
A top-level CLI command, corresponding to an API service.
"""
def __init__(self, clidriver, name):
self._clidriver = clidriver
self._name = name
self._command_table = None
self._lineage = [self]
self._service_model = None
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def service_model(self):
return self._get_service_model()
@property
def lineage(self):
return self._lineage
@lineage.setter
def lineage(self, value):
self._lineage = value
def _get_command_table(self):
if self._command_table is None:
self._command_table = self._create_command_table()
return self._command_table
def _get_service_model(self):
if self._service_model is None:
self._service_model = self._clidriver.get_service_model(self._name)
return self._service_model
def __call__(self, client_creator, args, parsed_globals):
# Once we know we're trying to call a service for this operation
# we can go ahead and create the parser for it. We
# can also grab the Service object from botocore.
service_parser = self._create_parser()
parsed_args, remaining = service_parser.parse_known_args(args)
command_table = self._get_command_table()
return command_table[parsed_args.operation](
client_creator, remaining, parsed_globals)
def _create_command_table(self):
command_table = OrderedDict()
service_model = self._get_service_model()
for operation_name in service_model.operation_names:
cli_name = xform_name(operation_name, '-')
operation_model = service_model.operation_model(operation_name)
command_table[cli_name] = ServiceOperation(
name=cli_name,
parent_name=self._name,
operation_model=operation_model,
operation_caller=CLIOperationCaller())
register_ext, register_cmd = get_extension_registers(self._name)
if register_cmd is not None:
register_cmd(service_model, command_table)
self._add_lineage(command_table)
return command_table
def filter_operation(self, operation_name, form_factor, operation_form_factors):
"""
Replace the named operation in this command's command table with a
filtered one.
"""
command_table = self._get_command_table()
cli_name = xform_name(operation_name, '-')
command_table[cli_name] = FilteredServiceOperation(
name=cli_name,
parent_name=self._name,
form_factor=form_factor,
operation_form_factors=operation_form_factors)
def _add_lineage(self, command_table):
for command in command_table:
command_obj = command_table[command]
command_obj.lineage = self.lineage + [command_obj]
def create_help_command(self):
command_table = OrderedDict()
for command_name, command in self._get_command_table().items():
command_table[command_name] = command
return ServiceHelpCommand(obj=self._get_service_model(),
command_table=command_table,
arg_table=None,
command_lineage='.'.join(self.lineage_names),
name=self._name)
def _create_parser(self):
command_table = self._get_command_table()
# Also add a 'help' command.
command_table['help'] = self.create_help_command()
return ServiceArgParser(
operations_table=command_table, service_name=self._name)
class FilteredServiceCommand(ServiceCommand):
"""
A stub service command that fails when run due to being under the wrong
CLI form factor.
"""
def __init__(self, clidriver, name, form_factor, service_form_factors):
super().__init__(clidriver, name)
self._clidriver = clidriver
self._name = name
self._form_factor = form_factor
self._service_form_factors = service_form_factors
def __call__(self, client_creator, args, parsed_globals):
raise WrongSvcFormFactorError(
service_name=self._name,
form_factor=self._form_factor,
service_form_factors=', '.join(self._service_form_factors))
class ServiceOperation(object):
ARG_TYPES = {
LIST_TYPE: ListArgument,
'boolean': BooleanArgument,
}
DEFAULT_ARG_CLASS = CLIArgument
def __init__(self, name, parent_name, operation_caller, operation_model):
self._arg_table = None
self._name = name
# These is used so we can figure out what the proper event
# name should be <parent name>.<name>.
self._parent_name = parent_name
# We can have more than one operation callers. They are called in
# order and if any returns 'False' no other callers will be called.
self._operation_callers = [operation_caller]
self._lineage = [self]
self._operation_model = operation_model
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def lineage(self):
return self._lineage
@lineage.setter
def lineage(self, value):
self._lineage = value
@property
def lineage_names(self):
# Represents the lineage of a command in terms of command ``name``
return [cmd.name for cmd in self.lineage]
@property
def arg_table(self):
if self._arg_table is None:
self._arg_table = self._create_argument_table()
return self._arg_table
def __call__(self, client_creator, args, parsed_globals):
# Handle extensions first, so OverrideRequiredArgs (CliInputJson,
# GenerateCliSkeleton, etc) could have a chance to run.
self._handle_extensions()
# We need to handle overriding required arguments before we create
# the parser as the parser will parse the arguments and decide which
# argument is required before we have a chance to modify the argument
# table.
self._handle_override_required_args(args)
# Once we know we're trying to call a particular operation
# of a service we can go ahead and load the parameters.
operation_parser = self._create_operation_parser(self.arg_table)
self._add_help(operation_parser)
parsed_args, remaining = operation_parser.parse_known_args(args)
if parsed_args.help == 'help':
return self.create_help_command()(
client_creator, remaining, parsed_globals)
elif parsed_args.help:
remaining.append(parsed_args.help)
if remaining:
raise UnknownArgumentError(
"Unknown options: %s" % ', '.join(remaining))
check_should_enable_pagination(self._arg_table,
self._operation_model,
parsed_args,
parsed_globals)
call_parameters = self._build_call_parameters(parsed_args,
self.arg_table)
return self._invoke_operation_callers(client_creator,
call_parameters,
parsed_args,
parsed_globals)
def create_help_command(self):
return OperationHelpCommand(
operation_model=self._operation_model,
arg_table=self.arg_table,
name=self._name,
command_lineage='.'.join(self.lineage_names))
def _add_help(self, parser):
# The 'help' output is processed a little differently from
# the operation help because the arg_table has
# CLIArguments for values.
parser.add_argument('help', nargs='?')
def _build_call_parameters(self, args, arg_table):
# We need to convert the args specified on the command
# line as valid **kwargs we can hand to botocore.
service_params = {}
# args is an argparse.Namespace object so we're using vars()
# so we can iterate over the parsed key/values.
parsed_args = vars(args)
for arg_object in arg_table.values():
py_name = arg_object.py_name
if py_name in parsed_args:
value = parsed_args[py_name]
value = unpack_argument(arg_object, value)
arg_object.add_to_params(service_params, value)
# We run the ParamFileVisitor over the input data to resolve any
# paramfile references in it.
service_params = ParamFileVisitor().visit(
service_params, self._operation_model.input_shape)
return service_params
def _create_argument_table(self):
argument_table = OrderedDict()
input_shape = self._operation_model.input_shape
required_arguments = []
arg_dict = {}
if input_shape is not None:
required_arguments = input_shape.required_members
arg_dict = input_shape.members
for arg_name, arg_shape in arg_dict.items():
cli_arg_name = xform_name(arg_name, '-')
arg_class = self.ARG_TYPES.get(arg_shape.type_name,
self.DEFAULT_ARG_CLASS)
is_required = arg_name in required_arguments
arg_object = arg_class(
name=cli_arg_name,
argument_model=arg_shape,
is_required=is_required,
operation_model=self._operation_model,
serialized_name=arg_name,
no_paramfile=arg_shape.is_no_paramfile)
arg_object.add_to_arg_table(argument_table)
add_pagination_params(self._operation_model, argument_table)
add_cli_input_json(self._operation_model, argument_table)
add_generate_skeleton(self._operation_model, argument_table)
return argument_table
def _create_operation_parser(self, arg_table):
return ArgTableArgParser(arg_table, service_name=self._parent_name,
operation_name=self._name)
def _handle_override_required_args(self, args):
argument_table = self.arg_table
for cli_name, cli_argument in argument_table.items():
if isinstance(cli_argument, OverrideRequiredArgsArgument):
cli_argument.override_required_args(argument_table, args)
self._operation_callers.insert(0, cli_argument)
def _handle_extensions(self):
if self._operation_model.extensions:
# Iterate in reversed order to keep the execution order:
# First extension should run first.
for ext_name in reversed(self._operation_model.extensions):
register_ext, register_cmd = get_extension_registers(ext_name)
if register_ext is None:
raise ExtensionImportError(ext_name=ext_name, err='Not Found')
register_ext(self._operation_callers, self._operation_model)
def _invoke_operation_callers(self,
client_creator,
call_parameters,
parsed_args,
parsed_globals):
def _create_client(service_name):
# The TLS verification value can be a boolean or a CA_BUNDLE path. This
# is a little odd, but ultimately comes from the python HTTP requests
# library we're using.
tls_verification = parsed_globals.verify_tls
ca_bundle = getattr(parsed_globals, 'ca_bundle', None)
if parsed_globals.verify_tls and ca_bundle is not None:
tls_verification = ca_bundle
# Retrieve values passed for extra client configuration.
config_kwargs = {}
if parsed_globals.read_timeout is not None:
config_kwargs['read_timeout'] = int(parsed_globals.read_timeout)
if parsed_globals.connect_timeout is not None:
config_kwargs['connect_timeout'] = int(parsed_globals.connect_timeout)
config = Config(**config_kwargs)
client = client_creator.create_client(
service_name,
parsed_globals.endpoint_url,
parsed_globals.cdp_region,
tls_verification,
client_creator.context.get_credentials(parsed_globals),
client_config=config)
return client
for operation_caller in self._operation_callers:
# Create a new client for each operation_caller because parsed_args and
# parsed_globals could be changed in each iteration.
if operation_caller.invoke(
_create_client,
self._operation_model,
call_parameters,
parsed_args,
parsed_globals) is False:
break
return 0
class FilteredServiceOperation(ServiceOperation):
"""
A stub service operation that fails when run due to being under the wrong
CLI form factor.
"""
def __init__(self, name, parent_name, form_factor, operation_form_factors):
super().__init__(name, parent_name, operation_caller=None, operation_model=None)
self._form_factor = form_factor
self._operation_form_factors = operation_form_factors
def __call__(self, client_creator, args, parsed_globals):
raise WrongOpFormFactorError(
operation_name=self._name,
service_name=self._parent_name,
form_factor=self._form_factor,
operation_form_factors=', '.join(self._operation_form_factors))
class CLIOperationCaller(object):
def invoke(self,
client_creator,
operation_model,
parameters,
parsed_args,
parsed_globals):
service_name = operation_model.service_model.service_name
operation_name = operation_model.name
client = client_creator(service_name)
py_operation_name = xform_name(operation_name)
if client.can_paginate(py_operation_name) and parsed_globals.paginate:
response = client.get_paginator(
py_operation_name).paginate(**parameters)
else:
response = getattr(client, xform_name(operation_name))(**parameters)
self._display_response(operation_name, response, parsed_globals)
return True
def _display_response(self, command_name, response, parsed_globals):
output = parsed_globals.output
if output is None:
output = "json"
formatter = get_formatter(output, parsed_globals)
formatter(command_name, response)
| 41.899301
| 90
| 0.639095
|
cba2c2d7ab58347d99533c90c4f4701c1db568b3
| 8,834
|
py
|
Python
|
docs/source/conf.py
|
bhumikapahariapuresoftware/django-otp
|
462908b0e5a930a9b1a7227b1410cfa0e70b2194
|
[
"BSD-2-Clause"
] | null | null | null |
docs/source/conf.py
|
bhumikapahariapuresoftware/django-otp
|
462908b0e5a930a9b1a7227b1410cfa0e70b2194
|
[
"BSD-2-Clause"
] | null | null | null |
docs/source/conf.py
|
bhumikapahariapuresoftware/django-otp
|
462908b0e5a930a9b1a7227b1410cfa0e70b2194
|
[
"BSD-2-Clause"
] | null | null | null |
# django-otp documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 13 09:48:33 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os.path
import sys
import django
import django.conf
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../ext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'otpdocs',
]
# autodoc and viewcode need valid settings in order to process Django modules.
django.conf.settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django_otp',
'django_otp.plugins.otp_hotp',
'django_otp.plugins.otp_totp',
'django_otp.plugins.otp_static',
'django_otp.plugins.otp_email',
],
SECRET_KEY='properly-configured',
)
django.setup()
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'django': ('https://docs.djangoproject.com/en/2.2/',
'https://docs.djangoproject.com/en/2.2/_objects/'),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-otp'
copyright = '2012, Peter Sagerson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = '1.1.3'
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-otpdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-otp.tex', 'django-otp Documentation',
'Peter Sagerson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-otp', 'django-otp Documentation',
['Peter Sagerson'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-otp', 'django-otp Documentation',
'Peter Sagerson', 'django-otp', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.105634
| 80
| 0.696966
|
fb20f279cc5b89c2e208647f19bcd16143c30ac0
| 1,051
|
py
|
Python
|
jesse/models/Ticker.py
|
clarkandrew/jesse
|
169f3c85c465ef6604d42bd27c28612365309b4a
|
[
"MIT"
] | 3,999
|
2018-11-09T10:38:51.000Z
|
2022-03-31T12:29:12.000Z
|
jesse/models/Ticker.py
|
clarkandrew/jesse
|
169f3c85c465ef6604d42bd27c28612365309b4a
|
[
"MIT"
] | 172
|
2020-04-16T16:19:08.000Z
|
2022-03-28T13:28:55.000Z
|
jesse/models/Ticker.py
|
pmondal08/jesse
|
527952a74bc76f76cf3a2d25755386f8db285885
|
[
"MIT"
] | 495
|
2019-03-01T21:48:53.000Z
|
2022-03-30T15:35:19.000Z
|
import peewee
import jesse.helpers as jh
from jesse.services.db import db
class Ticker(peewee.Model):
id = peewee.UUIDField(primary_key=True)
# timestamp in milliseconds
timestamp = peewee.BigIntegerField()
# the latest trades price
last_price = peewee.FloatField()
# the trading volume in the last 24 hours
volume = peewee.FloatField()
# the highest price in the last 24 hours
high_price = peewee.FloatField()
# the lowest price in the last 24 hours
low_price = peewee.FloatField()
symbol = peewee.CharField()
exchange = peewee.CharField()
class Meta:
database = db
indexes = ((('timestamp', 'exchange', 'symbol'), True),)
def __init__(self, attributes: dict = None, **kwargs) -> None:
peewee.Model.__init__(self, attributes=attributes, **kwargs)
if attributes is None:
attributes = {}
for a in attributes:
setattr(self, a, attributes[a])
if not jh.is_unit_testing():
# create the table
Ticker.create_table()
| 26.948718
| 68
| 0.656518
|
b9a1baf1762d57ebf63d69df413c59d1f4e182e2
| 16,983
|
py
|
Python
|
scripts/artifacts/powerlogGZ.py
|
xperylabhub/iLEAPP
|
fd1b301bf2094387f51ccdbd10ed233ce9abd687
|
[
"MIT"
] | null | null | null |
scripts/artifacts/powerlogGZ.py
|
xperylabhub/iLEAPP
|
fd1b301bf2094387f51ccdbd10ed233ce9abd687
|
[
"MIT"
] | 1
|
2021-01-16T05:32:40.000Z
|
2021-01-16T05:32:40.000Z
|
scripts/artifacts/powerlogGZ.py
|
xperylabhub/iLEAPP
|
fd1b301bf2094387f51ccdbd10ed233ce9abd687
|
[
"MIT"
] | null | null | null |
import gzip
import re
import os
import shutil
import sqlite3
from pathlib import Path
import scripts.artifacts.artGlobals
from packaging import version
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, logdevinfo, timeline, tsv, is_platform_windows, open_sqlite_db_readonly
def get_powerlogGZ(files_found, report_folder, seeker):
iOSversion = scripts.artifacts.artGlobals.versionf
data_list1 = []
data_list2 = []
data_list3 = []
data_list4 = []
data_list5 = []
data_list6 = []
data_list7 = []
data_list8 = []
data_list9 = []
data_list10 = []
data_list11 = []
data_list12 = []
data_list13 = []
data_list14 = []
data_list15 = []
data_list16 = []
data_list17 = []
data_list18 = []
logfunc('Unzipped Powerlog databases:')
for file_found in files_found:
file_found = str(file_found)
filename = Path(file_found)
ungzipedfile= Path(filename.parent,filename.stem)
with gzip.open(file_found, 'rb') as f_in:
with open(ungzipedfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
f_out.close()
logfunc(str(filename.stem))
file_found = str(Path(filename.parent))
db = open_sqlite_db_readonly(str(ungzipedfile))
cursor = db.cursor()
if version.parse(iOSversion) >= version.parse("9"):
cursor.execute('''
select
datetime(timestamp, 'unixepoch'),
datetime(timestamplogged, 'unixepoch'),
applicationname,
assertionid,
assertionname,
audioroute,
mirroringstate,
operation,
pid
from
plaudioagent_eventpoint_audioapp
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
for row in all_rows:
data_list1.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8]))
else:
logfunc('No data available in Powerlog Audio Routing via app')
if version.parse(iOSversion) >= version.parse("10"):
cursor.execute('''
select
datetime(timestamp, 'unixepoch'),
bulletinbundleid,
timeinterval / 60,
count,
posttype
from
plspringboardagent_aggregate_sbbulletins_aggregate
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
for row in all_rows:
data_list2.append((row[0],row[1],row[2],row[3],row[4]))
else:
logfunc('No data available in Aggregate Bulletins')
if version.parse(iOSversion) >= version.parse("10"):
cursor.execute('''
select
datetime(timestamp, 'unixepoch'),
notificationbundleid,
count as "count",
notificationtype
from
plspringboardagent_aggregate_sbnotifications_aggregate
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
for row in all_rows:
data_list3.append((row[0],row[1],row[2],row[3]))
else:
logfunc('No data available in Aggregate Notifications')
if version.parse(iOSversion) >= version.parse("9"):
cursor.execute('''
select
datetime(timestamp, 'unixepoch'),
appname,
appexecutable,
appbundleid,
appbuildversion,
appbundleversion,
apptype,
case appdeleteddate
when 0 then "not deleted"
else datetime(appdeleteddate, 'unixepoch')
end
from
plapplicationagent_eventnone_allapps
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
for row in all_rows:
data_list5.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7]))
else:
logfunc('No data available in Powerlog App Info')
if version.parse(iOSversion) >= version.parse("11"):
cursor.execute('''
select
datetime(timestamp, 'unixepoch'),
datetime(start, 'unixepoch'),
datetime(end, 'unixepoch'),
state,
finished,
haserror
from
plxpcagent_eventpoint_mobilebackupevents
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
for row in all_rows:
data_list6.append((row[0],row[1],row[2],row[3],row[4],row[5]))
else:
logfunc('No data available in Powerlog Backup Info')
if version.parse(iOSversion) >= version.parse("11"):
cursor.execute("""
select
datetime(appdeleteddate, 'unixepoch'),
datetime(timestamp, 'unixepoch'),
appname,
appexecutable,
appbundleid,
appbuildversion,
appbundleversion,
apptype
from
plapplicationagent_eventnone_allapps
where
appdeleteddate > 0 """)
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
for row in all_rows:
data_list7.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7]))
if version.parse(iOSversion) == version.parse("10"):
cursor.execute("""
select
datetime(appdeleteddate, 'unixepoch'),
datetime(timestamp, 'unixepoch'),
appname,
appexecutable,
appbundleid,
appbuildversion,
appbundleversion
from
plapplicationagent_eventnone_allapps
where
appdeleteddate > 0
""")
iall_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
for row in all_rows:
data_list7.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6]))
if version.parse(iOSversion) == version.parse("9"):
cursor.execute("""
select
datetime(appdeleteddate, 'unixepoch'),
datetime(timestamp, 'unixepoch'),
appname,
appbundleid
from
plapplicationagent_eventnone_allapps
where
appdeleteddate > 0
""")
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
for row in all_rows:
data_list7.append((row[0],row[1],row[2],row[3]))
if version.parse(iOSversion) >= version.parse("10"):
cursor.execute('''
select
datetime(timestamp, 'unixepoch'),
build,
device,
hwmodel,
pairingid
from
plconfigagent_eventnone_paireddeviceconfig
''')
else:
cursor.execute('''
select
datetime(timestamp, 'unixepoch'),
build,
device
from
plconfigagent_eventnone_paireddeviceconfig
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
if version.parse(iOSversion) >= version.parse("10"):
for row in all_rows:
data_list11.append((row[0],row[1],row[2],row[3],row[4]))
else:
for row in all_rows:
data_list11.append((row[0],row[1],row[2]))
if version.parse(iOSversion) >= version.parse("9"):
cursor.execute('''
select
datetime(tts + system, 'unixepoch'),
bundleid,
case level
when "0" then "off"
when "1" then "on"
end as status,
datetime(tts, 'unixepoch'),
datetime(tot, 'unixepoch'),
system
from
(
select
bundleid,
torchid,
tts,
tot,
max(toid),
system,
level
from
(
select
plcameraagent_eventforward_torch.timestamp as tts,
plcameraagent_eventforward_torch.bundleid,
plcameraagent_eventforward_torch.level,
plcameraagent_eventforward_torch.id as "torchid",
plstorageoperator_eventforward_timeoffset.timestamp as tot,
plstorageoperator_eventforward_timeoffset.id as toid,
plstorageoperator_eventforward_timeoffset.system,
bundleid
from
plcameraagent_eventforward_torch
left join
plstorageoperator_eventforward_timeoffset
)
as torchest
group by
torchid
)
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
for row in all_rows:
data_list15.append((row[0],row[1],row[2],row[3],row[4],row[5]))
if version.parse(iOSversion) >= version.parse("9"):
cursor.execute('''
select
datetime(wifipropts + system, 'unixepoch') ,
currentssid,
currentchannel,
datetime(tot, 'unixepoch') ,
system as time_offset
from
(
select
wifiorotsid,
wifipropts,
tot,
max(toi),
currentssid,
currentchannel,
system
from
(
select
plwifiagent_eventbackward_cumulativeproperties.timestamp as wifipropts,
currentssid,
currentchannel,
plwifiagent_eventbackward_cumulativeproperties.id as "wifiorotsid" ,
plstorageoperator_eventforward_timeoffset.timestamp as tot,
plstorageoperator_eventforward_timeoffset.id as toi,
plstorageoperator_eventforward_timeoffset.system
from
plwifiagent_eventbackward_cumulativeproperties
left join
plstorageoperator_eventforward_timeoffset
)
as wifipropst
group by
wifiorotsid
)
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
for row in all_rows:
data_list18.append((row[0],row[1],row[2],row[3],row[4]))
if version.parse(iOSversion) >= version.parse("9"):
if len(data_list1) > 0:
report = ArtifactHtmlReport('Powerlog Audio Routing via App')
report.start_artifact_report(report_folder, 'Audio Routing')
report.add_script()
data_headers1 = ('Timestamp','Timestamped Logged','Bundle ID','Assertion Name','Audio Route','Mirroring State','Operation','PID', 'Audio App Table ID' )
report.write_artifact_data_table(data_headers1, data_list1, file_found)
report.end_artifact_report()
tsvname = 'Powerlog Audio Routing via App from GZ backup'
tsv(report_folder, data_headers1, data_list1, tsvname)
tlactivity = 'Powerlog Audio Routing via App from GZ backup'
timeline(report_folder, tlactivity, data_list1, data_headers1)
if version.parse(iOSversion) >= version.parse("10"):
if len(data_list2) > 0:
report = ArtifactHtmlReport('Powerlog Aggregate Bulletins')
report.start_artifact_report(report_folder, 'Aggregate Bulletins')
report.add_script()
data_headers2 = ('Timestamp','Bulletin Bundle ID','Time Interval in Seconds','Count','Post Type')
report.write_artifact_data_table(data_headers2, data_list2, file_found)
report.end_artifact_report()
tsvname = 'Powerlog Agg Bulletins from GZ backup'
tsv(report_folder, data_headers2, data_list2, tsvname)
tlactivity = 'Powerlog Agg Bulletins from GZ backup'
timeline(report_folder, tlactivity, data_list2, data_headers2)
if version.parse(iOSversion) >= version.parse("10"):
if len(data_list3) > 0:
report = ArtifactHtmlReport('Powerlog Aggregate Notifications')
report.start_artifact_report(report_folder, 'Aggregate Notifications')
report.add_script()
data_headers3 = ('Timestamp','Notification Bundle ID','Count','Notification Type')
report.write_artifact_data_table(data_headers3, data_list3, file_found)
report.end_artifact_report()
tsvname = 'Powerlog Agg Notifications from GZ backup'
tsv(report_folder, data_headers3, data_list3, tsvname)
tlactivity = 'Powerlog Agg Notifications from GZ backup'
timeline(report_folder, tlactivity, data_list3, data_headers3)
if version.parse(iOSversion) >= version.parse("9"):
if len(data_list5) > 0:
report = ArtifactHtmlReport('Powerlog App Info')
report.start_artifact_report(report_folder, 'App Info')
report.add_script()
data_headers5 = ('Timestamp','App Name','App Executable Name','Bundle ID','App Build Version','App Bundle Version','App TYpe','App Deleted Date')
report.write_artifact_data_table(data_headers5, data_list5, file_found)
report.end_artifact_report()
tsvname = 'Powerlog App Info from GZ backup'
tsv(report_folder, data_headers5, data_list5, tsvname)
tlactivity = 'Powerlog App Info from GZ backup'
timeline(report_folder, tlactivity, data_list5, data_headers5)
if version.parse(iOSversion) >= version.parse("11"):
if len(data_list6) > 0:
report = ArtifactHtmlReport('Powerlog Backup Info')
report.start_artifact_report(report_folder, 'Backup Info')
report.add_script()
data_headers6 = ('Timestamp','Start','End','State','Finished','Has error' )
report.write_artifact_data_table(data_headers6, data_list6, file_found)
report.end_artifact_report()
tsvname = 'Powerlog Backup Info from GZ backup'
tsv(report_folder, data_headers6, data_list6, tsvname)
tlactivity = 'Powerlog Backup Info from GZ backup'
timeline(report_folder, tlactivity, data_list6, data_headers6)
if version.parse(iOSversion) >= version.parse("11"):
if len(data_list7) > 0:
report = ArtifactHtmlReport('Powerlog Deleted Apps')
report.start_artifact_report(report_folder, 'Deleted Apps')
report.add_script()
data_headers7 = ('App Deleted Date','Timestamp','App Name','App Executable Name','Bundle ID','App Build Version','App Bundle Version','App Type')
report.write_artifact_data_table(data_headers7, data_list7, file_found)
report.end_artifact_report()
tsvname = 'Powerlog Deleted Apps from GZ backup'
tsv(report_folder, data_headers7, data_list7, tsvname)
tlactivity = 'Powerlog Deleted Apps from GZ backup'
timeline(report_folder, tlactivity, data_list7, data_headers7)
if version.parse(iOSversion) == version.parse("10"):
if len(data_list7) > 0:
report = ArtifactHtmlReport('Powerlog Deleted Apps')
report.start_artifact_report(report_folder, 'Deleted Apps')
report.add_script()
data_headers7 = ('App Deleted Date','Timestamp','App Name','App Executable Name','Bundle ID','App Build Version','App Bundle Version')
report.write_artifact_data_table(data_headers7, data_list7, file_found)
report.end_artifact_report()
tsvname = 'Powerlog Deleted Apps from GZ backup'
tsv(report_folder, data_headers7, data_list7, tsvname)
tlactivity = 'Powerlog Deleted Apps from GZ backup'
timeline(report_folder, tlactivity, data_list7, data_headers7)
if version.parse(iOSversion) == version.parse("9"):
if len(data_list7) > 0:
report = ArtifactHtmlReport('Powerlog Deleted Apps')
report.start_artifact_report(report_folder, 'Deleted Apps')
report.add_script()
data_headers7 = ('App Deleted Date','Timestamp','App Name','Bundle ID')
report.write_artifact_data_table(data_headers7, data_list7, file_found)
report.end_artifact_report()
tsvname = 'Powerlog Deleted Apps from GZ backup'
tsv(report_folder, data_headers7, data_list7, tsvname)
tlactivity = 'Powerlog Deleted Apps from GZ backup'
timeline(report_folder, tlactivity, data_list7, data_headers7)
if version.parse(iOSversion) >= version.parse("10"):
if len(data_list11) > 0:
report = ArtifactHtmlReport('Powerlog Paired Device Configuration')
report.start_artifact_report(report_folder, 'Paired Device Configuration')
report.add_script()
data_headers11 = ('Timestamp','Build','Device','HW Model','Pairing ID')
report.write_artifact_data_table(data_headers11, data_list11, file_found)
report.end_artifact_report()
tsvname = 'Powerlog Paired Device Conf from GZ backup'
tsv(report_folder, data_headers11, data_list11, tsvname)
tlactivity = 'Powerlog Paired Device Configuration from GZ backup'
timeline(report_folder, tlactivity, data_list11, data_headers11)
else:
if len(data_list11) > 0:
report = ArtifactHtmlReport('Powerlog Paired Device Configuration')
report.start_artifact_report(report_folder, 'Paired Device Configuration')
report.add_script()
data_headers11 = ('Timestamp','Build','Device' )
report.write_artifact_data_table(data_headers11, data_list11, file_found)
report.end_artifact_report()
tsvname = 'Powerlog Paired Device Conf from GZ backup'
tsv(report_folder, data_headers11, data_list11, tsvname)
tlactivity = 'Powerlog Paired Device Configuration from GZ backup'
timeline(report_folder, tlactivity, data_list11, data_headers11)
if version.parse(iOSversion) >= version.parse("9"):
if len(data_list15) > 0:
report = ArtifactHtmlReport('Powerlog Torch')
report.start_artifact_report(report_folder, 'Torch')
report.add_script()
data_headers15 = ('Adjusted Timestamp','Bundle ID','Status','Original Torch Timestamp','Offset Timestamp','Time Offset')
report.write_artifact_data_table(data_headers15, data_list15, file_found)
report.end_artifact_report()
tsvname = 'Powerlog Torch from GZ backup'
tsv(report_folder, data_headers15, data_list15, tsvname)
tlactivity = 'Powerlog Torch from GZ backup'
timeline(report_folder, tlactivity, data_list15, data_headers15)
if version.parse(iOSversion) >= version.parse("9"):
if len(data_list18) > 0:
report = ArtifactHtmlReport('Powerlog WiFi Network Connections')
report.start_artifact_report(report_folder, 'WiFi Network Connections')
report.add_script()
data_headers18 = ('Adjusted Timestamp','Current SSID','Current Channel','Offset Timestamp','Time Offset')
report.write_artifact_data_table(data_headers18, data_list18, file_found)
report.end_artifact_report()
tsvname = 'Powerlog Wifi Network Connections from GZ backup'
tsv(report_folder, data_headers18, data_list18, tsvname)
tlactivity = 'Powerlog Wifi Network Connections from GZ backup'
timeline(report_folder, tlactivity, data_list18, data_headers18)
| 32.534483
| 158
| 0.711653
|
0bc277fbc08e131a42fccbc4b12394fd98250999
| 177
|
py
|
Python
|
conftest.py
|
jbittel/django-multimedia
|
4ddd5e6d9f4f680e2f4f68cc3616ced8f0fc2a43
|
[
"BSD-3-Clause"
] | 19
|
2015-01-28T08:40:20.000Z
|
2021-12-18T11:55:58.000Z
|
conftest.py
|
jbittel/django-multimedia
|
4ddd5e6d9f4f680e2f4f68cc3616ced8f0fc2a43
|
[
"BSD-3-Clause"
] | 2
|
2015-02-09T17:03:24.000Z
|
2015-04-22T17:57:45.000Z
|
conftest.py
|
jbittel/django-multimedia
|
4ddd5e6d9f4f680e2f4f68cc3616ced8f0fc2a43
|
[
"BSD-3-Clause"
] | 4
|
2015-02-02T14:05:08.000Z
|
2016-09-14T00:44:55.000Z
|
import os
from django.conf import settings
def pytest_configure():
if not settings.configured:
os.environ['DJANGO_SETTINGS_MODULE'] = 'multimedia.tests.settings'
| 19.666667
| 74
| 0.745763
|
8d3ec2c684e9d42d64acae2e20c08ca54aa8fccf
| 5,600
|
py
|
Python
|
unpythonic/tests/test_lispylet.py
|
Technologicat/unpythonic
|
7d99d50c5d1c3151fb822491167a7008024d97a5
|
[
"BSD-2-Clause"
] | 55
|
2018-07-26T15:52:22.000Z
|
2022-01-28T00:53:59.000Z
|
unpythonic/tests/test_lispylet.py
|
Technologicat/unpythonic
|
7d99d50c5d1c3151fb822491167a7008024d97a5
|
[
"BSD-2-Clause"
] | 85
|
2019-07-24T11:02:45.000Z
|
2022-01-31T22:00:23.000Z
|
unpythonic/tests/test_lispylet.py
|
Technologicat/unpythonic
|
7d99d50c5d1c3151fb822491167a7008024d97a5
|
[
"BSD-2-Clause"
] | 2
|
2020-09-04T21:39:19.000Z
|
2022-01-12T03:41:50.000Z
|
# -*- coding: utf-8 -*-
from ..syntax import macros, test, test_raises # noqa: F401
from ..test.fixtures import session, testset
from functools import partial
from ..lispylet import let, letrec, dlet, dletrec, blet, bletrec
from ..seq import begin
def runtests():
with testset("basic usage"):
x = let((('a', 1),
('b', 2)),
lambda e: e.a + e.b)
test[x == 3]
x = letrec((('a', 1),
('b', lambda e:
e.a + 2)), # hence, b = 3
lambda e:
e.a + e.b)
test[x == 4]
# mutually recursive functions
t = letrec((('evenp', lambda e:
lambda x:
(x == 0) or e.oddp(x - 1)),
('oddp', lambda e:
lambda x:
(x != 0) and e.evenp(x - 1))),
lambda e:
e.evenp(42))
test[t is True]
f = lambda lst: letrec((("seen", set()),
("see", lambda e:
lambda x:
begin(e.seen.add(x),
x))),
lambda e:
[e.see(x) for x in lst if x not in e.seen])
L = [1, 1, 3, 1, 3, 2, 3, 2, 2, 2, 4, 4, 1, 2, 3]
test[f(L) == [1, 3, 2, 4]]
# Callable values always need a surrounding "lambda e: ...".
with testset("additional examples with callable values"):
test[letrec((('a', 2),
('f', lambda e:
lambda x: # callable, needs the surrounding "lambda e: ...", even though it doesn't use e.
42 * x)),
lambda e:
e.a * e.f(1)) == 84]
square = lambda x: x**2
test[letrec((('a', 2),
('f', lambda e: square)), # callable, needs "lambda e: ..."
lambda e:
e.a * e.f(10)) == 200]
def mul(x, y):
return x * y
test[letrec((('a', 2),
('f', lambda e: mul)), # "mul" is a callable
lambda e:
e.a * e.f(3, 4)) == 24]
double = partial(mul, 2)
test[letrec((('a', 2),
('f', lambda e: double)), # "double" is a callable
lambda e:
e.a * e.f(3)) == 12]
class TimesA:
def __init__(self, a):
self.a = a
def __call__(self, x):
return self.a * x
times5 = TimesA(5)
test[letrec((('a', 2),
('f', lambda e: times5)), # "times5" is a callable
lambda e:
e.a * e.f(3)) == 30]
with testset("let over lambda"):
lc = let((('count', 0),),
lambda e:
lambda: begin(e.set('count', e.count + 1),
e.count))
lc()
lc()
test[lc() == 3]
with testset("let over def"):
@dlet((('x', 17),))
def foo(*, env):
return env.x
test[foo() == 17]
@dletrec((('x', 2),
('y', lambda e: e.x + 3)))
def bar(a, *, env):
return a + env.y
test[bar(10) == 15]
@dlet((('count', 0),))
def counter(*, env):
env.count += 1
return env.count
counter()
counter()
test[counter() == 3]
with testset("let block"):
@blet((('x', 9001),))
def over9000(*, env):
return env.x
test[over9000 == 9001]
@bletrec((('evenp', lambda e:
lambda x:
(x == 0) or e.oddp(x - 1)),
('oddp', lambda e:
lambda x:
(x != 0) and e.evenp(x - 1)),))
def result(*, env):
return env.evenp(42)
test[result is True]
with testset("error cases"):
test_raises[AttributeError,
letrec((('a', lambda e:
e.b + 1), # error, e.b does not exist yet (simple value refers to binding below it)
('b', 42)),
lambda e:
e.a)]
test_raises[AttributeError,
let((('x', 0),),
lambda e:
e.set('y', 3)),
"e.y should not be defined"]
with test_raises[AttributeError, "let environment should be final (should not be able to create new bindings in it inside the let body)"]:
@blet((('x', 1),))
def error1(*, env):
env.y = 2 # error, cannot introduce new bindings into a let environment
test_raises[TypeError, let((), "not a callable")]
test_raises[TypeError, let((), lambda: None)] # body callable must be able to take in environment
test_raises[AttributeError, let((('x', 1),
('x', 2)), lambda e: e.x)] # trying to reassign same name
test_raises[TypeError, letrec((('x', lambda: 1),), lambda e: e.x)] # callable value must be able to take in environment
if __name__ == '__main__': # pragma: no cover
with session(__file__):
runtests()
| 35.220126
| 146
| 0.392679
|
718ccb944448b1b47b2251896d6c608af7787bc7
| 1,190
|
py
|
Python
|
files/nginx.facts.py
|
Turgon37/ansible-nginx
|
bb3e5d6a18546b09f61e45b5066161e4683c7733
|
[
"MIT"
] | null | null | null |
files/nginx.facts.py
|
Turgon37/ansible-nginx
|
bb3e5d6a18546b09f61e45b5066161e4683c7733
|
[
"MIT"
] | null | null | null |
files/nginx.facts.py
|
Turgon37/ansible-nginx
|
bb3e5d6a18546b09f61e45b5066161e4683c7733
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import json
import re
import subprocess
import sys
content = dict()
version_re = re.compile('nginx/(?P<version>(?P<major>[0-9]+)\.(?P<branch>[0-9]+)\.[0-9]+)')
stdout = None
try:
result = subprocess.Popen(['/usr/bin/env', 'nginx', '-v'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
(stdout, stderr) = result.communicate()
except subprocess.CalledProcessError as e:
content['error'] = str(e)
if stdout is not None:
match = version_re.search(stderr + stdout)
if match:
content['version_full'] = match.group('version')
content['version_major'] = match.group('major')
content['version_branch'] = match.group('branch')
if 'version_branch' in content:
try:
version_branch = int(content['version_branch'])
if version_branch % 2 == 0:
content['branch'] = 'stable'
else:
content['branch'] = 'mainline'
except ValueError:
pass
if len(content) == 0:
content = None
print(json.dumps(content))
sys.exit(0)
| 27.045455
| 91
| 0.570588
|
409e61fe8b01e55f20af6c15b7fa510ecf89bb20
| 641
|
py
|
Python
|
day25.py
|
p88h/aoc2017
|
a929a8c0894559b0d7dd3d0b58c076295087f4c8
|
[
"Unlicense"
] | 1
|
2021-12-26T21:28:47.000Z
|
2021-12-26T21:28:47.000Z
|
day25.py
|
p88h/aoc2017
|
a929a8c0894559b0d7dd3d0b58c076295087f4c8
|
[
"Unlicense"
] | null | null | null |
day25.py
|
p88h/aoc2017
|
a929a8c0894559b0d7dd3d0b58c076295087f4c8
|
[
"Unlicense"
] | null | null | null |
import io
lines = io.open("day25.in").read().splitlines()
state = lines[0][15]
steps = int(lines[1].split()[5])
program = {}
curs = 0
tape = {}
def parse_op(stmt):
val = int(stmt[0][22])
dir = 1 if 'right' in stmt[1] else -1
nxt = stmt[2][26]
return (val, dir, nxt)
for blk in range(len(lines) // 10):
label = lines[blk*10+3][9]
program[label] = [ parse_op(lines[blk*10+5:blk*10+8]), parse_op(lines[blk*10+9:blk*10+12]) ]
for s in range(steps):
value = tape[curs] if curs in tape else 0
(value, dir, state) = program[state][value]
tape[curs] = value
curs += dir
print(list(tape.values()).count(1))
| 25.64
| 96
| 0.606864
|
00e5fe1a58ef056c96202320d14c085552f15c71
| 4,284
|
py
|
Python
|
openshift/test/test_v1_cluster_resource_quota_selector.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
openshift/test/test_v1_cluster_resource_quota_selector.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
openshift/test/test_v1_cluster_resource_quota_selector.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: v3.6.0-alpha.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from kubernetes.client.rest import ApiException
from openshift.client.models.v1_cluster_resource_quota_selector import V1ClusterResourceQuotaSelector
class TestV1ClusterResourceQuotaSelector(unittest.TestCase):
""" V1ClusterResourceQuotaSelector unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ClusterResourceQuotaSelector(self):
"""
Test V1ClusterResourceQuotaSelector
"""
model = openshift.client.models.v1_cluster_resource_quota_selector.V1ClusterResourceQuotaSelector()
if __name__ == '__main__':
unittest.main()
| 99.627907
| 3,380
| 0.794818
|
c8076782efa55c054dacb71f7dac4093480cbdc5
| 1,019
|
py
|
Python
|
src/xgboost_distribution/distributions/base.py
|
thomasaarholt/xgboost-distribution
|
8ee00f7f0dcaadcb345ebcb15534287081aa987b
|
[
"MIT"
] | null | null | null |
src/xgboost_distribution/distributions/base.py
|
thomasaarholt/xgboost-distribution
|
8ee00f7f0dcaadcb345ebcb15534287081aa987b
|
[
"MIT"
] | null | null | null |
src/xgboost_distribution/distributions/base.py
|
thomasaarholt/xgboost-distribution
|
8ee00f7f0dcaadcb345ebcb15534287081aa987b
|
[
"MIT"
] | null | null | null |
"""Distribution base class
"""
from abc import ABC, abstractmethod
from collections import namedtuple
class BaseDistribution(ABC):
"""Base class distribution for XGBDistribution.
Note that distributions are stateless, hence a distribution is just a collection of
functions that operate on the data (`y`) and the outputs of the xgboost (`params`).
"""
def __init__(self):
self.Predictions = namedtuple("Predictions", (p for p in self.params))
# attach to globals to make pickling of namedtuple work
globals()[self.Predictions.__name__] = self.Predictions
def check_target(self, y):
pass
@property
@abstractmethod
def params(self):
pass
@abstractmethod
def starting_params(self, y):
pass
@abstractmethod
def gradient_and_hessian(self, y, params, natural_gradient=True):
pass
@abstractmethod
def loss(self, y, params):
pass
@abstractmethod
def predict(self, params):
pass
| 24.261905
| 87
| 0.671246
|
520fb62e620e06929db16f63c783a0ef42413d8e
| 4,739
|
py
|
Python
|
test/python/createForestDatafile.py
|
hpcc-systems/LearningTrees
|
386e8f84ca102d9f87a002c05032830ebd4b8c90
|
[
"Apache-2.0"
] | 2
|
2017-12-11T09:55:35.000Z
|
2017-12-20T18:37:21.000Z
|
test/python/createForestDatafile.py
|
hpcc-systems/LearningTrees
|
386e8f84ca102d9f87a002c05032830ebd4b8c90
|
[
"Apache-2.0"
] | 5
|
2017-11-16T09:41:52.000Z
|
2018-10-04T18:18:56.000Z
|
test/python/createForestDatafile.py
|
hpcc-systems/LearningTrees
|
386e8f84ca102d9f87a002c05032830ebd4b8c90
|
[
"Apache-2.0"
] | 2
|
2017-06-23T13:35:05.000Z
|
2022-01-01T12:11:37.000Z
|
# Build an ecl version of the Cover Type Database:
# http://archive.ics.uci.edu/ml/datasets/Covertype
# for testing purposes.
# To run: python createForestDatafile.py > ..\datasets\CovTypeDS.ecl
import sklearn.datasets
trainSamples = 1000
testSamples = 500
covtype = sklearn.datasets.fetch_covtype()
data = covtype.data
target = covtype.target
columnsIn = ['Elevation', 'Aspect', 'Slope', 'Hyd_DistH', 'Hyd_DistV','Road_Dist',
'Hillshade_9', 'Hillshade_12', 'Hillshade_15', 'Fire_Dist',
'areaIsRawah', 'areaIsNeota', 'areaIsComanche', 'areaIsPoudre',
'soil1', 'soil2', 'soil3', 'soil4', 'soil5', 'soil6', 'soil7',
'soil8', 'soil9', 'soil10', 'soil11', 'soil12', 'soil13',
'soil14', 'soil15', 'soil16', 'soil17', 'soil18', 'soil19',
'soil20', 'soil21', 'soil22', 'soil23',
'soil24', 'soil25', 'soil26', 'soil27', 'soil28', 'soil29',
'soil30', 'soil31', 'soil32', 'soil33',
'soil34', 'soil35', 'soil36', 'soil37', 'soil38', 'soil39',
'soil40', 'covType']
# Convert area one-hot (binary) columns to Nominal to exercise nominal features
columnsOut = ['id', 'Elevation', 'Aspect', 'Slope', 'Hyd_DistH', 'Hyd_DistV', 'Road_Dist',
'Hillshade_9', 'Hillshade_12', 'Hillshade_15', 'Fire_Dist',
'areaId',
'soil1', 'soil2', 'soil3', 'soil4', 'soil5', 'soil6', 'soil7',
'soil8', 'soil9', 'soil10', 'soil11', 'soil12', 'soil13',
'soil14', 'soil15', 'soil16', 'soil17', 'soil18', 'soil19',
'soil20', 'soil21', 'soil22', 'soil23',
'soil24', 'soil25', 'soil26', 'soil27', 'soil28', 'soil29',
'soil30', 'soil31', 'soil32', 'soil33',
'soil34', 'soil35', 'soil36', 'soil37', 'soil38', 'soil39',
'soil40', 'covType']
areaIds = {1:'Rawah', 2:'Neota', 3:'Comanche', 4:'Poudre'}
covTypes = {1:'Spruce/Fir', 2:'Lodgepole', 3:'Ponderosa', 4:'CottonWillow',
5:'Aspen', 6:'DougFir', 7:'Bristlecone'}
excludeTypes = []
nominalColumns = [11, 52] # Indexes in columnsOut
listdata = []
datarows = data.shape[0]
for i in range(datarows):
if target[i] in excludeTypes:
continue
row = list(data[i]) + [target[i]]
listdata.append(row)
# Sample testing and training data at once (without replacement)
# to avoid any overlap
rows = trainSamples + testSamples
cols = len(columnsIn)
allRecs = sklearn.utils.resample(listdata, replace = False, n_samples = rows,
random_state = 314159)
# Convert area binaries to nominal
allRecsC = []
for i in range(rows):
areaId = 0
row = allRecs[i]
for j in range(10,14):
if row[j] > 0:
areaId = j-10+1
outRec = [(i+1)] + list(row[:10]) + [areaId] + list(row[14:])
allRecsC.append(outRec)
trainRecs = allRecsC[:trainSamples]
testRecs = allRecsC[trainSamples:]
outLines = []
outLines.append('EXPORT covTypeDS := MODULE')
outLine = ' EXPORT covTypeRec := RECORD'
outLines.append(outLine)
for i in range(len(columnsOut)):
field = columnsOut[i]
if i == 0:
outLine = ' UNSIGNED ' + field + ';'
else:
outLine = ' REAL ' + field + ';'
outLines.append(outLine)
outLine = ' END;'
outLines.append(outLine)
outLines.append('')
outLine = ' EXPORT numCols := ' + str(len(columnsOut)-1) + ';'
outLines.append(outLine)
outLine = ' EXPORT SET OF UNSIGNED nominalCols := ['
colList = []
for col in nominalColumns:
colList.append(str(col))
colString = ', '.join(colList)
outLine += colString + '];'
outLines.append(outLine)
outLines.append('')
outLine = ' EXPORT trainRecs := DATASET(['
outLines.append(outLine)
outRecs = []
for rec in trainRecs:
strRec = []
for field in rec:
strRec.append(str(field))
outRec = ' {' + ','.join(strRec) + '}'
outRecs.append(outRec)
outLines.append(',\n'.join(outRecs) + ']\n , covTypeRec);')
outLines.append('')
outLine = ' EXPORT testRecs := DATASET(['
outLines.append(outLine)
outRecs = []
for rec in testRecs:
strRec = []
for field in rec:
strRec.append(str(field))
outRec = ' {' + ','.join(strRec) + '}'
outRecs.append(outRec)
outLines.append(',\n'.join(outRecs) + ']\n , covTypeRec);')
outLines.append('')
outLines.append('END;')
header = """// This file is automatically generated by test/python/createForestDatafile.py
// It is an ecl extract of the CoverType database from scikit-learn.
// Each record represents the cartographic data for one plot of Rocky Mountain forest, including
// the primary tree cover for each plot.
// See: http://archive.ics.uci.edu/ml/datasets/Covertype for details.
"""
outStr = header + '\n'.join(outLines)
print(outStr)
| 34.34058
| 96
| 0.618485
|
c43d26d22339caef8366ae2bd3c4b4bef2f3c398
| 453
|
py
|
Python
|
data/scripts/templates/object/tangible/wearables/skirt/shared_skirt_s11.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/tangible/wearables/skirt/shared_skirt_s11.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/tangible/wearables/skirt/shared_skirt_s11.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/skirt/shared_skirt_s11.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","skirt_s11")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 26.647059
| 73
| 0.730684
|
ba3f4177034512de79905dbcb167395b0ff8e593
| 1,015
|
py
|
Python
|
app/app/urls.py
|
godomainz/recipe-app-api
|
ac3315ee44974c75de0c0dde95ae701549c3a8ae
|
[
"MIT"
] | null | null | null |
app/app/urls.py
|
godomainz/recipe-app-api
|
ac3315ee44974c75de0c0dde95ae701549c3a8ae
|
[
"MIT"
] | 7
|
2021-03-30T13:53:36.000Z
|
2022-01-13T03:15:17.000Z
|
app/app/urls.py
|
godomainz/recipe-app-api
|
ac3315ee44974c75de0c0dde95ae701549c3a8ae
|
[
"MIT"
] | null | null | null |
"""app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('user.urls')),
path('api/recipe/', include('recipe.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 37.592593
| 78
| 0.697537
|
306eb717fcb12e5a68cc2966f5b6a82fc062c208
| 3,105
|
py
|
Python
|
tabnine-vim/third_party/ycmd/third_party/bottle/plugins/werkzeug/bottle_werkzeug.py
|
MrMonk3y/vimrc
|
950230fb3fd7991d1234c2ab516ec03245945677
|
[
"MIT"
] | 239
|
2018-04-20T06:58:32.000Z
|
2022-03-22T18:06:08.000Z
|
plugins/werkzeug/bottle_werkzeug.py
|
alisaifee/bottle
|
2d38ec9b52cc0e8445b6e38e07c8b5a2711335fb
|
[
"MIT"
] | 10
|
2018-12-09T13:49:06.000Z
|
2021-07-03T00:38:53.000Z
|
plugins/werkzeug/bottle_werkzeug.py
|
alisaifee/bottle
|
2d38ec9b52cc0e8445b6e38e07c8b5a2711335fb
|
[
"MIT"
] | 99
|
2018-07-20T09:16:13.000Z
|
2022-03-20T11:58:56.000Z
|
"""
This plugin adds support for :class:`werkzeug.Response`, all kinds of
:exc:`werkzeug.exceptions` and provides a thread-local instance of
:class:`werkzeug.Request`. It basically turns Bottle into Flask.
The plugin instance doubles as a werkzeug module object, so you don't need to
import werkzeug in your application.
For werkzeug library documentation, see: http://werkzeug.pocoo.org/
Example::
import bottle
app = bottle.Bottle()
werkzeug = bottle.ext.werkzeug.Plugin()
app.install(werkzeug)
req = werkzueg.request # For the lazy.
@app.route('/hello/:name')
def say_hello(name):
greet = {'en':'Hello', 'de':'Hallo', 'fr':'Bonjour'}
language = req.accept_languages.best_match(greet.keys())
if language:
return werkzeug.Response('%s %s!' % (greet[language], name))
else:
raise werkzeug.exceptions.NotAcceptable()
"""
__autor__ = "Marcel Hellkamp"
__version__ = '0.1'
__license__ = 'MIT'
### CUT HERE (see setup.py)
import werkzeug
from werkzeug import *
import bottle
class WerkzeugDebugger(DebuggedApplication):
""" A subclass of :class:`werkzeug.debug.DebuggedApplication` that obeys the
:data:`bottle.DEBUG` setting. """
def __call__(self, environ, start_response):
if bottle.DEBUG:
return DebuggedApplication.__call__(self, environ, start_response)
return self.app(environ, start_response)
class WerkzeugPlugin(object):
""" This plugin adds support for :class:`werkzeug.Response`, all kinds of
:module:`werkzeug.exceptions` and provides a thread-local instance of
:class:`werkzeug.Request`. It basically turns Bottle into Flask. """
name = 'werkzeug'
api = 2
def __init__(self, evalex=False, request_class=werkzeug.Request,
debugger_class=WerkzeugDebugger):
self.request_class = request_class
self.debugger_class = debugger_class
self.evalex=evalex
self.app = None
def setup(self, app):
self.app = app
if self.debugger_class:
app.wsgi = self.debugger_class(app.wsgi, evalex=self.evalex)
app.catchall = False
def apply(self, callback, route):
def wrapper(*a, **ka):
environ = bottle.request.environ
bottle.local.werkzueg_request = self.request_class(environ)
try:
rv = callback(*a, **ka)
except werkzeug.exceptions.HTTPException, e:
rv = e.get_response(environ)
if isinstance(rv, werkzeug.BaseResponse):
rv = bottle.HTTPResponse(rv.iter_encoded(), rv.status_code, rv.header_list)
return rv
return wrapper
@property
def request(self):
''' Return a local proxy to the current :class:`werkzeug.Request`
instance.'''
return werkzeug.LocalProxy(lambda: bottle.local.werkzueg_request)
def __getattr__(self, name):
''' Convenient access to werkzeug module contents. '''
return getattr(werkzeug, name)
Plugin = WerkzeugPlugin
| 31.05
| 91
| 0.652496
|
895e5aa9f5f569c946e5743174fa5c0336a90f3b
| 61,360
|
py
|
Python
|
metalibm_core/core/ml_formats.py
|
metalibm/metalibm-clone
|
d04839e58950a156b79b763b9f45cb874e21ebfe
|
[
"MIT"
] | null | null | null |
metalibm_core/core/ml_formats.py
|
metalibm/metalibm-clone
|
d04839e58950a156b79b763b9f45cb874e21ebfe
|
[
"MIT"
] | null | null | null |
metalibm_core/core/ml_formats.py
|
metalibm/metalibm-clone
|
d04839e58950a156b79b763b9f45cb874e21ebfe
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
## @package ml_formats
# Metalibm Formats node precision
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
###############################################################################
# created: Dec 23rd, 2013
# last-modified: Mar 8th, 2018
#
# author(s): Nicolas Brunie (nicolas.brunie@kalray.eu)
###############################################################################
import operator
import re
import sollya
from metalibm_core.utility.log_report import Log
from metalibm_core.code_generation.code_constant import *
from metalibm_core.core.special_values import (
FP_SpecialValue, FP_PlusInfty, FP_MinusInfty, FP_QNaN, FP_SNaN,
FP_PlusZero, FP_MinusZero, NumericValue
)
from metalibm_core.core.display_utils import (
DisplayFormat,
fixed_point_beautify, multi_word_fixed_point_beautify,
DISPLAY_DD, DISPLAY_TD, DISPLAY_DS, DISPLAY_TS
)
S2 = sollya.SollyaObject(2)
## \defgroup ml_formats ml_formats
# @{
# numerical floating-point constants
ml_nan = sollya.parse("nan")
ml_infty = sollya.parse("infty")
def get_sollya_from_long(v):
result = sollya.SollyaObject(0)
power = sollya.SollyaObject(1)
base = S2**16
while v:
v, r = divmod(v, int(base))
result += int(r) * power
power *= base
return result
## Ancestor class for Metalibm's format classes
class ML_Format(object):
""" parent to every Metalibm's format class """
def __init__(self, name = None, display_format = None):
self.name = {} if name is None else name
self.display_format = {} if display_format is None else display_format
## return format name
def get_name(self, language = C_Code):
if language in self.name:
return self.name[language]
elif C_Code in self.name:
return self.name[C_Code]
else:
return "undefined format"
def __repr__(self):
return self.get_code_name()
## return source code name for the format
def get_code_name(self, language=None):
return self.get_name(language)
def get_match_format(self):
return self
def get_base_format(self):
return self
def get_support_format(self):
return self
def get_display_format(self, language):
if language in self.display_format:
return self.display_format[language]
elif C_Code in self.display_format:
return self.display_format[C_Code]
else:
return "ERROR_FORMAT"
## return the format's bit-size
def get_bit_size(self):
""" <abstract> return the bit size of the format (if it exists) """
print(self) # Exception ML_NotImplemented print
raise NotImplementedError
def is_cst_decl_required(self):
return False
## return the C code for args initialization
def generate_initialization(self, *args, **kwords):
return None
def get_value_from_integer_coding(self, value, base=10):
""" convert integer value to self's format value
assuming value is the canonical encoding
if base is None, value is assumed to be a number
else value is assumed to be a str """
raise NotImplementedError
def get_integer_coding(self, value):
raise NotImplementedError
def saturate(self, value):
""" Return value if it fits in self format range, else
the closest format bound """
raise NotImplementedError
## return the C code for value assignation to var
# @param var variable assigned
# @param value value being assigned
# @param final boolean flag indicating if this assignation is the last in an assignation list
def generate_assignation(self, var, value, final = True, language = C_Code):
final_symbol = ";\n" if final else ""
return "%s = %s" % (var, value)
# return the format maximal value
def get_max_value(self):
raise NotImplementedError
def is_vector_format(self):
return False
## class for floating-point exception
class ML_FloatingPointException: pass
## class for type of floating-point exceptions
class ML_FloatingPointException_Type(ML_Format):
## dummy placeholder to generate C constant for FP exception (should raise error)
def get_cst(self, value, language = C_Code):
return "NONE"
def is_cst_decl_required(self):
return False
def get_match_format(self):
return self
## ML object for floating-point exception type
ML_FPE_Type = ML_FloatingPointException_Type()
## ML object for floating-point underflow exception
ML_FPE_Underflow = ML_FloatingPointException()
## ML object for floating-point overflow exception
ML_FPE_Overflow = ML_FloatingPointException()
## ML object for floatingè-point inexact exception
ML_FPE_Inexact = ML_FloatingPointException()
## ML object for floating-point invalid exception
ML_FPE_Invalid = ML_FloatingPointException()
## ML object for floating-point divide by zero exception
ML_FPE_DivideByZero = ML_FloatingPointException()
class FormatAttributeWrapper(ML_Format):
""" Format attribute wrapper: decorators for format object
extend a base format with custom attributes
WARNING: the decorated format will not compared equal to the original
(undecorated) format """
def __init__(self, base_format, attribute_list):
self.base_format = base_format
self.attribute_list = attribute_list
def get_base_format(self):
return self.base_format.get_base_format()
def get_support_format(self):
return self.base_format.get_support_format()
def get_match_format(self):
return self.base_format.get_match_format()
def is_vector_format(self):
return self.base_format.is_vector_format()
def get_bit_size(self):
return self.base_format.get_bit_size()
def get_display_format(self, language = C_Code):
return self.base_format.get_display_format(language)
def get_name(self, language = C_Code):
str_list = self.attribute_list + [self.base_format.get_name(language = language)]
return " ".join(str_list)
def __str__(self):
return self.get_name()
## Class of rounding mode type
class ML_FloatingPoint_RoundingMode_Type(ML_Format):
name_map = {None: "ml_rnd_mode_t", C_Code: "ml_rnd_mode_t", OpenCL_Code: "ml_rnd_mode_t"}
def get_c_name(self):
return "ml_rnd_mode_t"
def get_name(self, language = C_Code):
return ML_FloatingPoint_RoundingMode_Type.name_map[language]
## Class of floating-point rounding mode
class ML_FloatingPoint_RoundingMode(object):
pass
## ML type object for rounding modes
ML_FPRM_Type = ML_FloatingPoint_RoundingMode_Type()
## ML object for rounding to nearest mode
ML_RoundToNearest = ML_FloatingPoint_RoundingMode()
## ML object for rounding toward zero mode
ML_RoundTowardZero = ML_FloatingPoint_RoundingMode()
## ML object for rouding towards plus infinity mode
ML_RoundTowardPlusInfty = ML_FloatingPoint_RoundingMode()
## ML object for rounding forwards minus infinity
ML_RoundTowardMinusInfty = ML_FloatingPoint_RoundingMode()
## ML object for current global rounding mode
ML_GlobalRoundMode = ML_FloatingPoint_RoundingMode()
## Ancestor class for abstract format
class ML_AbstractFormat(ML_Format):
def __init__(self, c_name):
ML_Format.__init__(self)
self.name[C_Code] = c_name
def __str__(self):
return self.name[C_Code]
## return the gappa constant corresponding to parameter
# @param cst_value constant value being translated
def get_gappa_cst(self, cst_value):
""" C code for constante cst_value """
old_display = sollya.settings.display
sollya.settings.display = sollya.hexadecimal
if isinstance(cst_value, int):
result = str(float(cst_value))
else:
result = str(cst_value)
sollya.settings.display = old_display
return result
def is_cst_decl_required(self):
return False
def is_abstract_format(ml_format):
return isinstance(ml_format, ML_AbstractFormat)
## ML object for exact format (no rounding involved)
ML_Exact = ML_AbstractFormat("ML_Exact")
## Ancestor class for instanciated formats
class ML_InstanciatedFormat(ML_Format): pass
## Ancestor class for all Floating-point formats
class ML_FP_Format(ML_Format):
""" parent to every Metalibm's floating-point class """
pass
@staticmethod
def is_fp_format(precision):
""" generic predicate to test whether or not precision
is a floating-point format """
return isinstance(precision, ML_FP_Format)
## Ancestor class for standard (as defined in IEEE-754) floating-point formats
class ML_Std_FP_Format(ML_FP_Format):
""" standard floating-point format base class """
def __init__(self, bit_size, exponent_size, field_size, c_suffix, c_name, ml_support_prefix, c_display_format, sollya_object, union_field_suffix = None):
ML_Format.__init__(self)
self.name[C_Code] = c_name
self.display_format[C_Code] = c_display_format
self.bit_size = bit_size
self.exponent_size = exponent_size
self.field_size = field_size
self.c_suffix = c_suffix
self.ml_support_prefix = ml_support_prefix
self.sollya_object = sollya_object
## suffix used when selecting format in a support library union
self.union_field_suffix = union_field_suffix
def get_ml_support_prefix(self):
return self.ml_support_prefix
def get_union_field_suffix(self):
return self.union_field_suffix
## return the sollya object encoding the format precision
def get_sollya_object(self):
return self.sollya_object
## round the sollya object @p value to the sollya precision
# equivalent to @p self
def round_sollya_object(self, value, round_mode = sollya.RN):
return sollya.round(value, self.get_sollya_object(), round_mode)
def __str__(self):
return self.name[C_Code]
def get_name(self, language = C_Code):
return self.name[C_Code]
def get_bias(self):
return - 2**(self.get_exponent_size() - 1) + 1
def get_emax(self):
return 2**self.get_exponent_size() - 2 + self.get_bias()
def get_special_value_coding(self, sv, language = C_Code):
""" Generate integer coding for a special value number
in self format """
assert FP_SpecialValue.is_special_value(sv)
return sv.get_integer_coding()
def saturate(self, value):
if abs(value) > self.get_max_value():
return FP_PlusInfty(self) if value > 0 else FP_MinusInfty(sel)
return value
## return the integer coding of @p value
# @param value numeric value to be converted
# @return value encoding (as an integer number)
def get_integer_coding(self, value, language = C_Code):
if FP_SpecialValue.is_special_value(value):
return self.get_special_value_coding(value, language)
elif value == ml_infty:
return self.get_special_value_coding(FP_PlusInfty(self), language)
elif value == -ml_infty:
return self.get_special_value_coding(FP_MinusInfty(self), language)
elif value != value:
# sollya NaN
return self.get_special_value_coding(FP_QNaN(self), language)
else:
pre_value = value
value = sollya.round(value, self.get_sollya_object(), sollya.RN)
# FIXME: managing negative zero
sign = int(1 if value < 0 else 0)
value = abs(value)
if value == 0.0:
Log.report(Log.Warning, "+0.0 forced during get_integer_coding conversion")
exp_biased = 0
mant = 0
else:
try:
exp = int(sollya.floor(sollya.log2(value)))
except ValueError as e:
Log.report(Log.Error, "unable to compute int(sollya.floor(sollya.log2({}))), pre_value={}", value, pre_value, error=e)
exp_biased = int(exp - self.get_bias())
if exp < self.get_emin_normal():
exp_biased = 0
mant = int((value / S2**self.get_emin_subnormal()))
else:
mant = int((value / S2**exp - 1.0) * (S2**self.get_field_size()))
return mant | (exp_biased << self.get_field_size()) | (sign << (self.get_field_size() + self.get_exponent_size()))
def get_value_from_integer_coding(self, value, base=10):
""" Convert a value binary encoded following IEEE-754 standard
to its floating-point numerical (or special) counterpart """
value = int(value, base)
exponent_field = ((value >> self.get_field_size()) & (2**self.get_exponent_size() - 1))
is_subnormal = (exponent_field == 0)
mantissa = value & (2**self.get_field_size() - 1)
sign_bit = value >> (self.get_field_size() + self.get_exponent_size())
if exponent_field == self.get_nanorinf_exp_field():
if mantissa == 0 and sign_bit:
return FP_MinusInfty(self)
elif mantissa == 0 and not(sign_bit):
return FP_PlusInfty(self)
else:
# NaN value
quiet_bit = mantissa >> (self.get_field_size() - 1)
if quiet_bit:
return FP_QNaN(self)
else:
return FP_SNaN(self)
elif exponent_field == 0 and mantissa == 0:
if sign_bit:
return FP_MinusZero(self)
else:
return FP_PlusZero(self)
else:
assert exponent_field != self.get_nanorinf_exp_field()
exponent = exponent_field + self.get_bias() + (1 if is_subnormal else 0)
sign = -1.0 if sign_bit != 0 else 1.0
mantissa_value = mantissa
implicit_digit = 0.0 if is_subnormal else 1.0
return NumericValue(sign * S2**int(exponent) * (implicit_digit + mantissa_value * S2**-self.get_field_size()))
# @return<SollyaObject> the format omega value, the maximal normal value
def get_omega(self):
return S2**self.get_emax() * (2 - S2**-self.get_field_size())
# @return<SollyaObject> the format maximal value
def get_max_value(self):
return self.get_omega()
def get_min_normal_value(self):
""" return the minimal normal number in @p self format """
return S2**self.get_emin_normal()
## return the exponent field corresponding to
# a special value (inf or NaN)
def get_nanorinf_exp_field(self):
return S2**self.get_exponent_size() - 1
## Return the minimal exponent for a normal number
def get_emin_normal(self):
return 1 + self.get_bias()
## Return the minimal exponent for a subnormal number
def get_emin_subnormal(self):
return 1 - (self.get_field_size()) + self.get_bias()
## Return the display (for debug message) associated
# to format @p self
def get_display_format(self, language = C_Code):
return self.display_format[language]
def get_bit_size(self):
""" return the format bit size """
return self.bit_size
def get_zero_exponent_value(self):
return 0
def get_special_exponent_value(self):
return 2**self.get_exponent_size() - 1
def get_exponent_size(self):
return self.exponent_size
def get_exponent_interval(self):
low_bound = self.get_emin_normal()
high_bound = self.get_emax()
return sollya.Interval(low_bound, high_bound)
## return the size of the mantissa bitfield (excluding implicit bit(s))
def get_field_size(self):
return self.field_size
## Return the complete mantissa size (including implicit bit(s))
def get_mantissa_size(self):
return self.field_size + 1
def get_cst(self, cst_value, language = C_Code):
"""Return how a constant of value cst_value should be written in the
language language for this meta-format.
"""
if language is C_Code:
return self.get_c_cst(cst_value)
elif language is Gappa_Code:
return self.get_gappa_cst(cst_value)
else:
# default case
return self.get_c_cst(cst_value)
def get_c_cst(self, cst_value):
""" C code for constant cst_value """
if isinstance(cst_value, FP_SpecialValue):
return cst_value.get_c_cst()
else:
old_display = sollya.settings.display
sollya.settings.display = sollya.hexadecimal
if cst_value == sollya.SollyaObject(0):
conv_result = "0.0" + self.c_suffix
if cst_value == ml_infty:
conv_result = "INFINITY"
elif cst_value == ml_nan:
conv_result = "NAN"
elif isinstance(cst_value, int):
conv_result = str(float(cst_value)) + self.c_suffix
else:
if isinstance(cst_value, sollya.SollyaObject):
conv_result = str(self.round_sollya_object(cst_value)) + self.c_suffix
else:
conv_result = str(cst_value) + self.c_suffix
if conv_result == "0f":
conv_result = "0.0f"
sollya.settings.display = old_display
return conv_result
def get_precision(self):
""" return the bit-size of the mantissa """
return self.get_field_size()
def get_gappa_cst(self, cst_value):
""" C code for constante cst_value """
if isinstance(cst_value, FP_SpecialValue):
return cst_value.get_gappa_cst()
else:
old_display = sollya.settings.display
sollya.settings.display = sollya.hexadecimal
if isinstance(cst_value, int):
result = str(float(cst_value))
else:
result = str(cst_value)
sollya.settings.display = old_display
return result
def get_integer_format(self):
""" Return a signed integer format whose size matches @p self format
(useful for casts) """
int_precision = {
ML_Binary16: ML_Int16,
ML_Binary32: ML_Int32,
ML_Binary64: ML_Int64,
ML_Binary80: None,
BFloat16: ML_Int16,
}
return int_precision[self]
def get_unsigned_integer_format(self):
""" Return an unsigned integer format whose size matches @p self format
(useful for casts) """
uint_precision = {
ML_Binary16: ML_UInt16,
ML_Binary32: ML_UInt32,
ML_Binary64: ML_UInt64,
BFloat16: ML_UInt16,
ML_Binary80: None,
}
return uint_precision[self]
def is_std_float(precision):
return isinstance(precision, ML_Std_FP_Format)
## Generic constructor for Metalibm formats
class ML_FormatConstructor(ML_Format):
""" Generic constructor for Metalibm formats """
## Object constructor
# @param bit_size size of the format (in bits)
# @param c_name name of the format in the C language
# @param c_display_format string format to display @p self format value
# @param get_c_cst function self, value -> Node to generate
# constant value associated with @p self format
def __init__(self, bit_size, c_name, c_display_format, get_c_cst):
ML_Format.__init__(self)
self.bit_size = bit_size
self.name[C_Code] = c_name
self.display_format[C_Code] = c_display_format
self.get_cst_map = {C_Code: get_c_cst}
## generate a constant value with numerical value @p value
# in language @p language
def get_cst(self, value, language = C_Code):
return self.get_cst_map[language](self, value)
def __str__(self):
return self.name[C_Code]
## Return the format size (in bits)
def get_bit_size(self):
return self.bit_size
## a virtual format is a format which is internal to Metalibm
# representation and relies on an other non-virtual format
# for support in generated code
class VirtualFormat(ML_Format):
def __init__(self,
base_format = None,
support_format = None,
get_cst = lambda self, value, language:
self.base_format.get_cst(value, language),
cst_decl_required = False
):
ML_Format.__init__(self)
self.support_format = support_format
self.base_format = base_format
self.internal_get_cst = get_cst
# is constant declaration required
self.cst_decl_required = cst_decl_required
def get_cst(self, cst_value, language = C_Code):
return self.internal_get_cst(self, cst_value, language)
def __str__(self):
return "{}/{}".format(str(self.base_format), self.support_format)
## return name for the format
def get_name(self, language = C_Code):
#raise NotImplementedError
return self.support_format.get_name(language = language)
## return source code name for the format
def get_code_name(self, language=None):
code_name = self.support_format.get_name(language = language)
return code_name
def set_support_format(self, _format):
self.support_format = _format
def get_match_format(self):
return self.base_format
def get_base_format(self):
return self.base_format
def get_support_format(self):
return self.support_format
def get_signed(self):
return self.get_base_format().get_signed()
def get_bit_size(self):
return self.get_base_format().get_bit_size()
def is_cst_decl_required(self):
return self.cst_decl_required
def is_vector_format(self):
return False
def round_sollya_object(self, value, round_mode=sollya.RN):
return self.get_base_format().round_sollya_object(value, round_mode)
def get_display_format(self, language):
if self is self.get_base_format():
print(self)
raise Exception()
return self.get_base_format().get_display_format(language)
def get_virtual_cst(prec, value, language):
""" constant get for virtual format """
return prec.get_support_format().get_cst(
prec.get_base_format().get_integer_coding(value, language)
)
## Virtual format with no match forwarding
class VirtualFormatNoForward(VirtualFormat):
def get_match_format(self):
return self
class VirtualFormatNoBase(VirtualFormat):
""" Virtual format class which does not point towards a distinct
base format """
def get_match_format(self):
return self
def get_base_format(self):
return self
def get_vector_format(self):
return False
## Ancestor to fixed-point format
class ML_Fixed_Format(ML_Format):
""" parent to every Metalibm's fixed-point class """
def __init__(self, align = 0):
ML_Format.__init__(self)
# self.support_format must be an integer format
# used to contain the fixed-point value
# offset between the support LSB and the actual value LSB
self.support_right_align = align
def get_match_format(self):
return self
def get_base_format(self):
return self
def get_name(self, language = C_Code):
return ML_Format.get_name(self, language = language)
def get_code_name(self, language = C_Code):
return ML_Format.get_code_name(self, language = language)
def set_support_right_align(self, align):
self.support_right_align = align
def get_support_right_align(self):
return self.support_right_align
class ML_Base_FixedPoint_Format(ML_Fixed_Format, VirtualFormatNoBase):
""" base class for standard integer format """
def __init__(self, integer_size, frac_size, signed = True, support_format = None, align = 0):
""" standard fixed-point format object initialization function """
ML_Fixed_Format.__init__(self, align)
VirtualFormatNoBase.__init__(self, support_format = support_format)
self.integer_size = integer_size
self.frac_size = frac_size
self.signed = signed
## @return size (in bits) of the integer part of @p self formats
# may be negative to indicate a right shift of the fractionnal
# part
def get_integer_size(self):
return self.integer_size
def get_c_bit_size(self):
return self.c_bit_size
def get_integer_coding(self, value):
# FIXME: managed c_bit_size & sign properly
return value * 2**self.frac_size
def get_value_from_integer_coding(self, value, base=10):
# FIXME: managed c_bit_size & sign properly
if not base is None:
value = int(value, base)
return value * 2**-self.frac_size
@staticmethod
def match(format_str):
""" returns None if format_str does not match the class pattern
or a re.match if it does """
return re.match("(?P<name>F[US])(?P<integer>-?[\d]+)\.(?P<frac>-?[\d]+)",format_str)
## @return size (in bits) of the fractional part of
# @p self formats
# may be negative to indicate a left shift of the integer part
def get_frac_size(self):
return self.frac_size
def get_precision(self):
""" return the number of digits after the point """
return self.frac_size
## @return boolean signed/unsigned property
def get_signed(self):
return self.signed
## return the maximal possible value for the format
def get_max_value(self):
offset = -1 if self.get_signed() else 0
max_code_exp = self.get_integer_size() + self.get_frac_size()
code_value = S2**(max_code_exp + offset) - 1
return code_value * S2**-self.get_frac_size()
## @p round the numerical value @p value to
# @p self fixed-point format while applying
# @p round_mode to determine rounding direction
# @return rounded value (SollyaObject)
def round_sollya_object(self, value, round_mode=sollya.RN):
rnd_function = {
sollya.RN: sollya.nearestint,
sollya.RD: sollya.floor,
sollya.RU: sollya.ceil,
sollya.RZ: lambda x: sollya.floor(x) if x > 0 \
else sollya.ceil(x)
}[round_mode]
scale_factor = S2**self.get_frac_size()
return rnd_function(scale_factor * value) / scale_factor
## return the minimal possible value for the format
def get_min_value(self):
if not self.get_signed():
return 0
else:
max_code_exp = self.get_integer_size() + self.get_frac_size()
code_value = S2**(max_code_exp - 1)
return - (code_value * S2**-self.get_frac_size())
## if value exceeds formats then
def truncate(self, value):
descaled_value = value * S2**self.get_frac_size()
masked_value = int(descaled_value) & int(S2**self.get_bit_size() - 1)
scaled_value = masked_value * S2**-self.get_frac_size()
if scaled_value > self.get_max_value():
scaled_value -= S2**self.get_integer_size()
return scaled_value
def __str__(self):
if self.signed:
return "FS%d.%d" % (self.integer_size, self.frac_size)
else:
return "FU%d.%d" % (self.integer_size, self.frac_size)
def get_bit_size(self):
return self.integer_size + self.frac_size
def get_cst(self, cst_value, language = C_Code):
if language is C_Code:
return self.get_c_cst(cst_value)
elif language is Gappa_Code:
return self.get_gappa_cst(cst_value)
else:
return self.get_c_cst(cst_value)
def saturate(self, value):
""" Saturate value to stay within:
[self.get_min_value(), self.get_max_value()] """
return min(max(value, self.get_min_value()), self.get_max_value())
def get_integer_coding(self, value, language=C_Code):
if value > self.get_max_value() or value < self.get_min_value():
Log.report(Log.Error, "value {} is out of format {} range [{}; {}]", value, self, self.get_min_value(), self.get_max_value())
if value < 0:
if not self.signed:
Log.report(Log.Error, "negative value encountered {} while converting for an unsigned precision: {}".format(value, self))
encoded_value = (~int(abs(value) * S2**self.frac_size) + 1) % 2**self.get_bit_size()
return encoded_value
else:
encoded_value = int(value * S2**self.frac_size)
return encoded_value
def get_c_cst(self, cst_value):
""" C-language constant generation """
try:
encoded_value = int(cst_value * S2**self.frac_size)
except (ValueError, TypeError) as e:
print(e, cst_value, self.frac_size)
Log.report(Log.Error, "Error during constant conversion to sollya object from format {}", str(self))
if self.c_bit_size in [8, 16, 32, 64]:
return ("" if self.signed else "U") + "INT" + str(self.c_bit_size) + "_C(" + str(encoded_value) + ")"
elif self.c_bit_size == 128:
return self.get_128b_c_cst(cst_value)
else:
Log.report(Log.Error, "Error unsupported format {} with c_bit_size={} in get_c_cst", str(self), self.c_bit_size)
def get_128b_c_cst(self, cst_value):
""" specific get_cst function for 128-bit ML_Standard_FixedPoint_Format
:param self: fixed-point format object
:type self: ML_Standard_FixedPoint_Format
:param cst_value: numerical constant value
:type cst_value: SollyaObject
:return: string encoding of 128-bit constant
:rtype: str
"""
try:
encoded_value = int(cst_value * S2**self.frac_size)
lo_value = encoded_value % 2**64
assert lo_value >= 0
hi_value = encoded_value >> 64
except (ValueError, TypeError) as e:
print(e, cst_value, self.frac_size)
Log.report(Log.Error, "Error during constant conversion to sollya object from format {}", str(self))
return "(((({u}__int128) {hi_value}{postfix}) << 64) + ((unsigned __int128) {lo_value}ull))".format(
u="unsigned " if not self.signed else "",
postfix="ull" if not self.signed else "ll",
hi_value=str(hi_value),
lo_value=str(lo_value)
)
def get_gappa_cst(self, cst_value):
""" Gappa-language constant generation """
return str(cst_value)
class ML_Base_SW_FixedPoint_Format(ML_Base_FixedPoint_Format):
""" Base Fixed-Point format for software implementation,
try to infer the required size of C-format to support
this format """
MAX_BIT_SIZE = 128
MIN_BIT_SIZE = 1
POSSIBLE_SIZES = [8, 16, 32, 64, 128]
# class initialized to allow proper format comparison
DISPLAY_FORMAT_MAP = {}
C_NAME_MAP = {
True: {
8: "int8_t",
16: "int16_t",
32: "int32_t",
64: "int64_t",
128: "__int128"
},
False: {
8: "uint8_t",
16: "uint16_t",
32: "uint32_t",
64: "uint64_t",
128: "unsigned __int128"
},
}
def __init__(self, integer_size, frac_size, signed=True, support_format=None, align=0):
# FIXME: align parameter is not used
ML_Base_FixedPoint_Format.__init__(
self,
integer_size,
frac_size,
signed,
support_format
)
# guess the minimal bit_size required in the c repesentation
bit_size = integer_size + frac_size
if bit_size < self.MIN_BIT_SIZE or bit_size > self.MAX_BIT_SIZE:
Log.report(Log.Warning, "unsupported bit_size {} in ML_Base_SW_FixedPoint_Format".format(bit_size))
self.dbg_name = ("" if self.signed else "u") + "int" + str(bit_size)
else:
# for python2 compatibility min without default argument is used
possible_list = [n for n in self.POSSIBLE_SIZES if n >= bit_size]
if len(possible_list) == 0:
self.c_bit_size = None
else:
self.c_bit_size = min(possible_list)
if self.c_bit_size is None:
Log.report(Log.Error, "not able to find a compatible c_bit_size for {} = {} + {}", bit_size, integer_size, frac_size)
c_name = ML_Base_SW_FixedPoint_Format.C_NAME_MAP[self.signed][self.c_bit_size]
c_display_format = self.build_display_format_object()
self.name[C_Code] = c_name
self.display_format[C_Code] = c_display_format
self.dbg_name = c_name
def build_display_format_object(self):
key = (self.integer_size, self.frac_size, self.support_format)
if not key in ML_Base_SW_FixedPoint_Format.DISPLAY_FORMAT_MAP:
if self.c_bit_size <= 64:
display_format = DisplayFormat(
format_string="%e/%\"PRI" + ("i" if self.signed else "u") + str(self.c_bit_size) + "\"",
pre_process_fct=fixed_point_beautify(self.frac_size)
)
else:
assert (self.c_bit_size % 64 == 0)
num_64b_chunk = int(self.c_bit_size / 64)
format_string = "%e / " + ("%\"PRIx64\"" * num_64b_chunk)
display_format = DisplayFormat(
format_string=format_string,
pre_process_fct=multi_word_fixed_point_beautify(num_64b_chunk, self.frac_size)
)
# using class map memoization to simplify type comparison
# by keeping a single display object for isomorphic type objects
ML_Base_SW_FixedPoint_Format.DISPLAY_FORMAT_MAP[key] = display_format
else:
display_format = ML_Base_SW_FixedPoint_Format.DISPLAY_FORMAT_MAP[key]
return display_format
def get_display_format(self, language=C_Code):
return self.display_format[language]
## Ancestor to standard (meaning integers) fixed-point format
class ML_Standard_FixedPoint_Format(ML_Base_SW_FixedPoint_Format):
def __init__(self, integer_size, frac_size, signed = True):
ML_Base_SW_FixedPoint_Format.__init__(self, integer_size, frac_size, signed = signed, support_format = self, align = 0)
## use 0 as the LSB weight to round in sollya
def get_sollya_object(self):
return sollya.SollyaObject(0)
## round the sollya object @p value to the sollya precision
# equivalent to @p self
def round_sollya_object(self, value, round_mode = sollya.RN):
# TBD: support other rounding mode
return sollya.nearestint(value)
def __repr__(self):
return self.dbg_name
def __str__(self):
return self.dbg_name
class ML_Custom_FixedPoint_Format(ML_Base_SW_FixedPoint_Format):
""" Custom fixed-point format class """
def __eq__(self, other):
""" equality predicate for custom fixed-point format object """
return (type(self) == type(other)) and (self.__dict__ == other.__dict__)
def __ne__(self, other):
""" unequality predicate for custom fixed-point format object """
return not (self == other)
def __hash__(self):
return self.integer_size * self.frac_size * 2 + 1 if self.signed else 0
@staticmethod
def parse_from_match(format_match):
""" Parse the description of a class format and generates
the format object """
assert not format_match is None
name = format_match.group("name")
integer_size = int(format_match.group("integer"))
frac_size = int(format_match.group("frac"))
is_signed = (name == "FS")
return ML_Custom_FixedPoint_Format(integer_size, frac_size, signed=is_signed)
## parse a string describing a ML_Custom_FixedPoint_Format object
# @param format_str string describing the format object
# @return the format instance converted from the string
@staticmethod
def parse_from_string(format_str):
format_match = ML_Custom_FixedPoint_Format.match(format_str)
return ML_Custom_FixedPoint_Format.parse_from_match(format_match)
# Standard binary floating-point format declarations
## IEEE binary32 (fp32) single precision floating-point format
ML_Binary32 = ML_Std_FP_Format(32, 8, 23, "f", "float", "fp32", DisplayFormat("%a"), sollya.binary32, union_field_suffix = "f")
## IEEE binary64 (fp64) double precision floating-point format
ML_Binary64 = ML_Std_FP_Format(64, 11, 52, "", "double", "fp64", DisplayFormat("%la"), sollya.binary64, union_field_suffix = "d")
ML_Binary80 = ML_Std_FP_Format(80, 15, 64, "L", "long double", "fp80", DisplayFormat("%la"), sollya.binary80)
## IEEE binary16 (fp16) half precision floating-point format
ML_Binary16 = ML_Std_FP_Format(16, 5, 10, "__ERROR__", "half", "fp16", DisplayFormat("%a"), sollya.binary16)
def round_to_bfloat16(value, round_mode=sollya.RN, flush_sub_to_zero=True):
""" round value to Brain Float16 format """
# when given a number of digit, sollya assumes infinite exponent range
rounded_value = sollya.round(value, 11, round_mode)
# we flush subnormal to 0.0
if abs(rounded_value) < ML_Binary32.get_min_normal_value():
if flush_sub_to_zero:
return FP_PlusZero(BFloat16_Base) if rounded_value >= 0 else FP_MinusZero(BFloat16_Base)
else:
raise NotImplementedError("bfloat16 rounding only support flushing subnormal to 0")
# second rounding to binary32 ensure that overflow are detected properly
return sollya.round(rounded_value, sollya.binary32, round_mode)
def bfloat16_get_cst(cst_value, language):
return ML_UInt16.get_cst(ML_Binary32.get_integer_coding(cst_value) >> 16, language)
class BFloat16_Class(ML_Std_FP_Format):
def __init__(self, flush_sub_to_zero=True):
ML_Std_FP_Format.__init__(self, 16, 8, 7, "__ERROR__", "bfloat16", "bfloat16", DisplayFormat("%a"), sollya.SollyaObject(10))
self.flush_sub_to_zero = flush_sub_to_zero
def round_sollya_object(self, value, round_mode=sollya.RN):
return round_to_bfloat16(value, round_mode=round_mode, flush_sub_to_zero=self.flush_sub_to_zero)
BFloat16_Base = BFloat16_Class(flush_sub_to_zero=True)
# Standard integer format declarations
ML_Int8 = ML_Standard_FixedPoint_Format(8, 0, True)
ML_UInt8 = ML_Standard_FixedPoint_Format(8, 0, False)
ML_Int16 = ML_Standard_FixedPoint_Format(16, 0, True)
ML_UInt16 = ML_Standard_FixedPoint_Format(16, 0, False)
ML_Int32 = ML_Standard_FixedPoint_Format(32, 0, True)
ML_UInt32 = ML_Standard_FixedPoint_Format(32, 0, False)
ML_Int64 = ML_Standard_FixedPoint_Format(64, 0, True)
ML_UInt64 = ML_Standard_FixedPoint_Format(64, 0, False)
ML_Int128 = ML_Standard_FixedPoint_Format(128, 0, True)
ML_UInt128 = ML_Standard_FixedPoint_Format(128, 0, False)
ML_Int256 = ML_Standard_FixedPoint_Format(256, 0, True)
# Brain Float16 format
BFloat16 = VirtualFormatNoForward(
base_format=BFloat16_Base,
support_format=ML_UInt16,
get_cst=(lambda self, cst_value, language: bfloat16_get_cst(cst_value, language))
)
def bool_get_c_cst(self, cst_value):
if cst_value:
return "ML_TRUE"
else:
return "ML_FALSE"
class ML_Bool_Format(object):
""" abstract Boolean format """
pass
class ML_BoolClass(ML_FormatConstructor, ML_Bool_Format):
def __str__(self):
return "ML_Bool"
ML_Bool = ML_BoolClass(32, "int", DisplayFormat("%d"), bool_get_c_cst)
## virtual parent to string formats
class ML_String_Format(ML_Format):
""" abstract String format """
pass
class ML_StringClass(ML_String_Format):
""" Metalibm character string class """
def __init__(self, c_name, c_display_format, get_c_cst):
ML_Format.__init__(self)
self.name[C_Code] = c_name
self.display_format[C_Code] = c_display_format
self.get_cst_map = {C_Code: get_c_cst}
def get_cst(self, value, language = C_Code):
return self.get_cst_map[language](self, value)
def __str__(self):
return self.name[C_Code]
## Metalibm string format
ML_String = ML_StringClass("char*", DisplayFormat("%s"), lambda self, s: "\"{}\"".format(s))
## Predicate checking if @p precision is a standard integer format
def is_std_integer_format(precision):
return isinstance(precision, ML_Standard_FixedPoint_Format) or \
(isinstance(precision, ML_Format) and \
isinstance(precision.get_base_format(), ML_Standard_FixedPoint_Format) and \
not precision.is_vector_format())
#return precision in [ ML_Int8, ML_UInt8, ML_Int16, ML_UInt16,
# ML_Int32, ML_UInt32, ML_Int64, ML_UInt64,
# ML_Int128, ML_UInt128 ]
def is_std_signed_integer_format(precision):
return is_std_integer_format(precision) and \
(precision.get_base_format().get_signed() or \
precision.get_signed())
#return precision in [ ML_Int8, ML_Int16, ML_Int32, ML_Int64, ML_Int128 ]
def is_std_unsigned_integer_format(precision):
return is_std_integer_format(precision) and \
((not precision.get_base_format().get_signed()) or \
(not precision.get_signed()))
#return precision in [ ML_UInt8, ML_UInt16, ML_UInt32, ML_UInt64, ML_UInt128 ]
def is_table_index_format(precision):
""" Predicate to test if <precision> can be used as table index format """
return isinstance(precision, ML_Standard_FixedPoint_Format) or \
isinstance(precision.get_match_format(), ML_Standard_FixedPoint_Format) and \
not precision.is_vector_format()
def get_std_integer_support_format(precision):
""" return the ML's integer format to contains
the fixed-point format precision """
assert(isinstance(precision, ML_Fixed_Format))
format_map = {
# signed
True: {
8: ML_Int8,
16: ML_Int16,
32: ML_Int32,
64: ML_Int64,
128: ML_Int128,
},
# unsigned
False: {
8: ML_UInt8,
16: ML_UInt16,
32: ML_UInt32,
64: ML_UInt64,
128: ML_UInt128,
},
}
return format_map[precision.get_signed()][precision.get_c_bit_size()]
## functor for abstract format construction
def AbstractFormat_Builder(name, inheritance):
field_map = {
"name": name,
"__str__": lambda self: self.name[C_Code],
}
return type(name, (ML_AbstractFormat,) + inheritance, field_map)
class ML_IntegerClass(ML_AbstractFormat, ML_Fixed_Format): pass
class ML_FloatClass(ML_AbstractFormat, ML_FP_Format): pass
class ML_AbstractBoolClass(ML_AbstractFormat, ML_Bool_Format): pass
# abstract formats singleton
ML_Integer = ML_IntegerClass("ML_Integer") #AbstractFormat_Builder("ML_Integer", (ML_Fixed_Format,))("ML_Integer")
ML_Float = ML_FloatClass("ML_Float") #AbstractFormat_Builder("ML_Float", (ML_FP_Format,))("ML_Float")
ML_AbstractBool = ML_AbstractBoolClass("ML_AbstractBool")#AbstractFormat_Builder("MLAbstractBool", (ML_Bool_Format,))("ML_AbstractBool")
###############################################################################
# COMPOUND FORMAT
###############################################################################
class ML_Compound_Format(ML_Format):
def __init__(self, c_name, c_field_list, field_format_list, ml_support_prefix, c_display_format, sollya_object):
ML_Format.__init__(self)
self.name[C_Code] = c_name
self.display_format[C_Code] = c_display_format
self.ml_support_prefix = ml_support_prefix
self.sollya_object = sollya_object
self.c_field_list = c_field_list
self.field_format_list = field_format_list
def __str__(self):
return self.name[C_Code]
## return the sollya object encoding the format precision
def get_sollya_object(self):
return self.sollya_object
def round_sollya_object(self, value, round_mode=sollya.RN):
""" Round a numerical value encapsulated in a SollyaObject
to @p self format with rounding mode @p round_mode
@return SollyaObject """
return sollya.round(value, self.get_sollya_object(), round_mode)
## forces constant declaration during code generation
def is_cst_decl_required(self):
return True
def get_bit_size(self):
""" Return bit-size of the full compound format """
return sum([scalar.get_bit_size() for scalar in self.field_format_list])
def get_cst(self, cst_value, language = C_Code):
tmp_cst = cst_value
field_str_list = []
for field_name, field_format in zip(self.c_field_list, self.field_format_list):
# FIXME, round is only valid for double_double or triple_double stype format
field_value = sollya.round(tmp_cst, field_format.sollya_object, sollya.RN)
tmp_cst = tmp_cst - field_value
field_str_list.append(".%s = %s" % (field_name, field_format.get_c_cst(field_value)))
return "{%s}" % (", ".join(field_str_list))
def get_gappa_cst(self, cst_value):
""" Constant generation in Gappa-language """
return str(cst_value)
class ML_Compound_FP_Format(ML_Compound_Format, ML_FP_Format):
pass
class ML_Compound_Integer_Format(ML_Compound_Format, ML_Fixed_Format):
pass
class ML_FP_MultiElementFormat(ML_Compound_FP_Format):
""" parent format for multi-precision format (single single,
double double, triple double ...) """
@staticmethod
def is_fp_multi_elt_format(format_object):
return isinstance(format_object, ML_FP_MultiElementFormat)
def get_bit_size(self):
return sum([scalar.get_bit_size() for scalar in self.field_format_list])
def get_limb_precision(self, limb_index):
return self.field_format_list[limb_index]
@property
def limb_num(self):
""" return the number of limbs of the multi-precision format """
return len(self.field_format_list)
# compound binary floating-point format declaration
ML_DoubleDouble = ML_FP_MultiElementFormat("ml_dd_t", ["hi", "lo"],
[ML_Binary64, ML_Binary64],
"", DISPLAY_DD,
sollya.doubledouble)
ML_TripleDouble = ML_FP_MultiElementFormat("ml_td_t", ["hi", "me", "lo"],
[ML_Binary64, ML_Binary64,
ML_Binary64],
"", DISPLAY_TD,
sollya.tripledouble)
ML_SingleSingle = ML_FP_MultiElementFormat("ml_ds_t", ["hi", "lo"],
[ML_Binary32, ML_Binary32],
"", DISPLAY_DS,
2*ML_Binary32.get_mantissa_size() + 1)
ML_TripleSingle = ML_FP_MultiElementFormat("ml_ts_t", ["hi", "me", "lo"],
[ML_Binary32, ML_Binary32, ML_Binary32],
"", DISPLAY_TS,
3*ML_Binary32.get_mantissa_size() + 1)
###############################################################################
# VECTOR FORMAT
###############################################################################
## common ancestor to every vector format
class ML_VectorFormat(ML_Format):
def __init__(self, scalar_format, vector_size, c_name):
ML_Format.__init__(self, name = {C_Code: c_name})
self.scalar_format = scalar_format
self.vector_size = vector_size
def is_vector_format(self):
return True
def get_bit_size(self):
return self.vector_size * self.scalar_format.get_bit_size()
def __str__(self):
return self.get_code_name(language = C_Code)
def get_scalar_format(self):
return self.scalar_format
def set_scalar_format(self, new_scalar_format):
self.scalar_format = new_scalar_format
def get_vector_size(self):
return self.vector_size
def set_vector_size(self, new_vector_size):
self.vector_size = new_vector_size
def get_name(self, language = C_Code):
try:
return ML_Format.get_name(self, language)
except KeyError:
return self.get_scalar_format().get_name(language)
## Generic class for Metalibm support library vector format
class ML_CompoundVectorFormat(ML_VectorFormat, ML_Compound_Format):
def __init__(self, c_format_name, opencl_format_name, vector_size, scalar_format, sollya_precision = None, cst_callback = None):
ML_VectorFormat.__init__(self, scalar_format, vector_size, c_format_name)
ML_Compound_Format.__init__(self, c_format_name, ["_[%d]" % i for i in range(vector_size)], [scalar_format for i in range(vector_size)], "", "", sollya_precision)
# registering OpenCL-C format name
self.name[OpenCL_Code] = opencl_format_name
self.cst_callback = cst_callback
def get_cst_default(self, cst_value, language = C_Code):
elt_value_list = [self.scalar_format.get_cst(cst_value[i], language = language) for i in range(self.vector_size)]
if language is C_Code:
return "{._ = {%s}}" % (", ".join(elt_value_list))
elif language is OpenCL_Code:
return "(%s)(%s)" % (self.get_name(language = OpenCL_Code), (", ".join(elt_value_list)))
else:
Log.report(Log.Error, "unsupported language in ML_CompoundVectorFormat.get_cst: %s" % (language))
def get_cst(self, cst_value, language = C_Code):
if self.cst_callback is None:
return self.get_cst_default(cst_value, language)
else:
return self.cst_callback(self, cst_value, language)
class ML_IntegerVectorFormat(ML_CompoundVectorFormat, ML_Fixed_Format):
pass
class ML_FloatingPointVectorFormat(ML_CompoundVectorFormat, ML_FP_Format):
pass
class ML_MultiPrecision_VectorFormat(ML_CompoundVectorFormat):
@property
def limb_num(self):
return self.scalar_format.limb_num
def get_limb_precision(self, limb_index):
limb_format = self.scalar_format.get_limb_precision(limb_index)
return VECTOR_TYPE_MAP[limb_format][self.vector_size]
## helper function to generate a vector format
# @param format_name string name of the result format
# @param vector_size integer number of element in the vector
# @param scalar_format ML_Format object, format of a vector's element
# @param sollya_precision pythonsollya object, sollya precision to be used for computation
# @param compound_constructor ML_Compound_Format child class used to build the result format
# @param cst_callback function (self, value, language) -> str, used to generate constant value code
def vector_format_builder(c_format_name, opencl_format_name, vector_size,
scalar_format, sollya_precision=None,
compound_constructor=ML_FloatingPointVectorFormat, cst_callback=None):
return compound_constructor(c_format_name, opencl_format_name, vector_size,
scalar_format, sollya_precision, cst_callback)
v2float32 = vector_format_builder("ml_float2_t", "float2", 2, ML_Binary32)
v3float32 = vector_format_builder("ml_float3_t", "float3", 3, ML_Binary32)
v4float32 = vector_format_builder("ml_float4_t", "float4", 4, ML_Binary32)
v8float32 = vector_format_builder("ml_float8_t", "float8", 8, ML_Binary32)
v2float64 = vector_format_builder("ml_double2_t", "double2", 2, ML_Binary64)
v3float64 = vector_format_builder("ml_double3_t", "double3", 3, ML_Binary64)
v4float64 = vector_format_builder("ml_double4_t", "double4", 4, ML_Binary64)
v8float64 = vector_format_builder("ml_double8_t", "double8", 8, ML_Binary64)
v2bool = vector_format_builder("ml_bool2_t", "int2", 2, ML_Bool, compound_constructor = ML_IntegerVectorFormat)
v3bool = vector_format_builder("ml_bool3_t", "int3", 3, ML_Bool, compound_constructor = ML_IntegerVectorFormat)
v4bool = vector_format_builder("ml_bool4_t", "int4", 4, ML_Bool, compound_constructor = ML_IntegerVectorFormat)
v8bool = vector_format_builder("ml_bool8_t", "int8", 8, ML_Bool, compound_constructor = ML_IntegerVectorFormat)
v2int32 = vector_format_builder("ml_int2_t", "int2", 2, ML_Int32, compound_constructor = ML_IntegerVectorFormat)
v3int32 = vector_format_builder("ml_int3_t", "int3", 3, ML_Int32, compound_constructor = ML_IntegerVectorFormat)
v4int32 = vector_format_builder("ml_int4_t", "int4", 4, ML_Int32, compound_constructor = ML_IntegerVectorFormat)
v8int32 = vector_format_builder("ml_int8_t", "int8", 8, ML_Int32, compound_constructor = ML_IntegerVectorFormat)
v2uint32 = vector_format_builder("ml_uint2_t", "uint2", 2, ML_UInt32, compound_constructor = ML_IntegerVectorFormat)
v3uint32 = vector_format_builder("ml_uint3_t", "uint3", 3, ML_UInt32, compound_constructor = ML_IntegerVectorFormat)
v4uint32 = vector_format_builder("ml_uint4_t", "uint4", 4, ML_UInt32, compound_constructor = ML_IntegerVectorFormat)
v8uint32 = vector_format_builder("ml_uint8_t", "uint8", 8, ML_UInt32, compound_constructor = ML_IntegerVectorFormat)
v2int64 = vector_format_builder("ml_long2_t", "long2", 2, ML_Int64, compound_constructor = ML_IntegerVectorFormat)
v3int64 = vector_format_builder("ml_long3_t", "long3", 3, ML_Int64, compound_constructor = ML_IntegerVectorFormat)
v4int64 = vector_format_builder("ml_long4_t", "long4", 4, ML_Int64, compound_constructor = ML_IntegerVectorFormat)
v8int64 = vector_format_builder("ml_long8_t", "long8", 8, ML_Int64, compound_constructor = ML_IntegerVectorFormat)
v2uint64 = vector_format_builder("ml_ulong2_t", "ulong2", 2, ML_UInt64, compound_constructor = ML_IntegerVectorFormat)
v3uint64 = vector_format_builder("ml_ulong3_t", "ulong3", 3, ML_UInt64, compound_constructor = ML_IntegerVectorFormat)
v4uint64 = vector_format_builder("ml_ulong4_t", "ulong4", 4, ML_UInt64, compound_constructor = ML_IntegerVectorFormat)
v8uint64 = vector_format_builder("ml_ulong8_t", "ulong8", 8, ML_UInt64, compound_constructor = ML_IntegerVectorFormat)
###############################################################################
# VECTOR MULTI-PRECISION FORMAT
###############################################################################
v2dualfloat32 = vector_format_builder("ml_dualfloat2_t", "unsupported", 2, ML_SingleSingle, compound_constructor=ML_MultiPrecision_VectorFormat)
v3dualfloat32 = vector_format_builder("ml_dualfloat3_t", "unsupported", 3, ML_SingleSingle, compound_constructor=ML_MultiPrecision_VectorFormat)
v4dualfloat32 = vector_format_builder("ml_dualfloat4_t", "unsupported", 4, ML_SingleSingle, compound_constructor=ML_MultiPrecision_VectorFormat)
v8dualfloat32 = vector_format_builder("ml_dualfloat8_t", "unsupported", 8, ML_SingleSingle, compound_constructor=ML_MultiPrecision_VectorFormat)
v2dualfloat64 = vector_format_builder("ml_dualdouble2_t", "unsupported", 2, ML_DoubleDouble, compound_constructor=ML_MultiPrecision_VectorFormat)
v3dualfloat64 = vector_format_builder("ml_dualdouble3_t", "unsupported", 3, ML_DoubleDouble, compound_constructor=ML_MultiPrecision_VectorFormat)
v4dualfloat64 = vector_format_builder("ml_dualdouble4_t", "unsupported", 4, ML_DoubleDouble, compound_constructor=ML_MultiPrecision_VectorFormat)
v8dualfloat64 = vector_format_builder("ml_dualdouble8_t", "unsupported", 8, ML_DoubleDouble, compound_constructor=ML_MultiPrecision_VectorFormat)
v2trifloat32 = vector_format_builder("ml_trifloat2_t", "unsupported", 2, ML_TripleSingle, compound_constructor=ML_MultiPrecision_VectorFormat)
v3trifloat32 = vector_format_builder("ml_trifloat3_t", "unsupported", 3, ML_TripleSingle, compound_constructor=ML_MultiPrecision_VectorFormat)
v4trifloat32 = vector_format_builder("ml_trifloat4_t", "unsupported", 4, ML_TripleSingle, compound_constructor=ML_MultiPrecision_VectorFormat)
v8trifloat32 = vector_format_builder("ml_trifloat8_t", "unsupported", 8, ML_TripleSingle, compound_constructor=ML_MultiPrecision_VectorFormat)
v2trifloat64 = vector_format_builder("ml_tridouble2_t", "unsupported", 2, ML_TripleDouble, compound_constructor=ML_MultiPrecision_VectorFormat)
v3trifloat64 = vector_format_builder("ml_tridouble3_t", "unsupported", 3, ML_TripleDouble, compound_constructor=ML_MultiPrecision_VectorFormat)
v4trifloat64 = vector_format_builder("ml_tridouble4_t", "unsupported", 4, ML_TripleDouble, compound_constructor=ML_MultiPrecision_VectorFormat)
v8trifloat64 = vector_format_builder("ml_tridouble8_t", "unsupported", 8, ML_TripleDouble, compound_constructor=ML_MultiPrecision_VectorFormat)
LIST_SINGLE_MULTI_PRECISION_VECTOR_FORMATS = [v2dualfloat32, v3dualfloat32, v4dualfloat32, v8dualfloat32, v2trifloat32, v3trifloat32, v4trifloat32, v8trifloat32]
LIST_DOUBLE_MULTI_PRECISION_VECTOR_FORMATS = [v2dualfloat64, v3dualfloat64, v4dualfloat64, v8dualfloat64, v2trifloat64, v3trifloat64, v4trifloat64, v8trifloat64]
VECTOR_TYPE_MAP = {
ML_Binary32: {
2: v2float32,
3: v3float32,
4: v4float32,
8: v8float32
},
ML_Binary64: {
2: v2float64,
3: v3float64,
4: v4float64,
8: v8float64
},
ML_Int32: {
2: v2int32,
3: v3int32,
4: v4int32,
8: v8int32
},
ML_UInt32: {
2: v2uint32,
3: v3uint32,
4: v4uint32,
8: v8uint32
},
ML_Int64: {
2: v2int64,
3: v3int64,
4: v4int64,
8: v8int64
},
ML_UInt64: {
2: v2uint64,
3: v3uint64,
4: v4uint64,
8: v8uint64
},
ML_Bool: {
2: v2bool,
3: v3bool,
4: v4bool,
8: v8bool
},
ML_SingleSingle: {
2: v2dualfloat32,
3: v3dualfloat32,
4: v4dualfloat32,
8: v8dualfloat32,
},
ML_TripleSingle: {
2: v2trifloat64,
3: v3trifloat64,
4: v4trifloat64,
8: v8trifloat64,
},
ML_DoubleDouble: {
2: v2dualfloat64,
3: v3dualfloat64,
4: v4dualfloat64,
8: v8dualfloat64,
},
ML_TripleDouble: {
2: v2trifloat64,
3: v3trifloat64,
4: v4trifloat64,
8: v8trifloat64,
},
}
###############################################################################
# GENERIC, NON NUMERICAL FORMATS
###############################################################################
ML_Void = ML_FormatConstructor(0, "void", "ERROR", lambda _: None)
class FP_Context(object):
""" Floating-Point context """
def __init__(self, rounding_mode = ML_GlobalRoundMode, silent = None):
self.rounding_mode = rounding_mode
self.init_ev_value = None
self.init_rnd_mode_value = None
self.silent = silent
def get_rounding_mode(self):
return self.rounding_mode
def get_silent(self):
return self.silent
class FunctionFormat(object):
""" format for function object """
pass
def merge_abstract_format(*args):
""" return the most generic abstract format
to unify args formats """
has_float = False
has_integer = False
has_bool = False
for arg_type in args:
arg_base = arg_type.get_base_format()
if isinstance(arg_base, ML_FP_Format): has_float = True
if isinstance(arg_base, ML_Fixed_Format): has_integer = True
if isinstance(arg_base, ML_Bool_Format): has_bool = True
if has_float: return ML_Float
if has_integer: return ML_Integer
if has_bool: return ML_AbstractBool
else:
print([str(arg) for arg in args])
Log.report(Log.Error, "unknown formats while merging abstract format tuple")
## @}
# end of metalibm's Doxygen ml_formats group
| 39.740933
| 166
| 0.669003
|
e2e6b3a7df083bc5d048f422539e2cd1ee15dddd
| 5,893
|
py
|
Python
|
src/reducer/testing.py
|
fritzo/pomagma
|
224bb6adab3fc68e2d853e6365b4b86a8f7f468f
|
[
"Apache-2.0"
] | 10
|
2015-06-09T00:25:01.000Z
|
2019-06-11T16:07:31.000Z
|
src/reducer/testing.py
|
fritzo/pomagma
|
224bb6adab3fc68e2d853e6365b4b86a8f7f468f
|
[
"Apache-2.0"
] | 25
|
2015-03-23T23:16:01.000Z
|
2017-08-29T03:35:59.000Z
|
src/reducer/testing.py
|
fritzo/pomagma
|
224bb6adab3fc68e2d853e6365b4b86a8f7f468f
|
[
"Apache-2.0"
] | null | null | null |
"""Tools for testing implementations of reduce() and simplify()."""
import os
from importlib import import_module
import hypothesis.strategies as s
import pytest
from parsable import parsable
from pomagma.reducer import bohm
from pomagma.reducer.linker import link
from pomagma.reducer.syntax import (APP, BOOL, BOT, CODE, EQUAL, EVAL, JOIN,
MAYBE, NVAR, QAPP, QEQUAL, QLESS, QQUOTE,
QUOTE, TOP, UNIT, B, C, I, K, S, Y, is_app,
is_equal, is_quote, sexpr_parse,
sexpr_print)
DIR = os.path.dirname(os.path.abspath(__file__))
TESTDATA = os.path.join(DIR, 'testdata')
# ----------------------------------------------------------------------------
# parameterized testing
def iter_test_cases(test_id, suites=None):
assert isinstance(test_id, str), test_id
print('test_id = {}'.format(test_id))
if suites is None:
module = import_module('pomagma.reducer.{}'.format(test_id))
suites = module.SUPPORTED_TESTDATA
for suite in suites:
basename = '{}.sexpr'.format(suite)
filename = os.path.join(TESTDATA, basename)
print('reading {}'.format(filename))
with open(filename) as f:
for i, line in enumerate(f):
parts = line.split(';', 1)
sexpr = parts[0].strip()
if sexpr:
message = 'In {}:{}\n{}'.format(basename, 1 + i, line)
try:
term = sexpr_parse(sexpr)
except ValueError as e:
raise ValueError('{} {}'.format(message, e))
comment = None if len(parts) < 2 else parts[1].strip()
yield term, comment, message
def parse_xfail(comment, test_id):
if comment.startswith('xfail'):
if test_id is None:
return True
if test_id in comment[len('xfail'):].strip().split(' '):
return True
return False
def iter_equations(test_id, suites=None):
assert isinstance(test_id, str), test_id
for term, comment, message in iter_test_cases(test_id, suites):
if is_equal(term):
lhs = link(bohm.convert(term[1]))
rhs = link(bohm.convert(term[2]))
example = lhs, rhs, message
if comment and parse_xfail(comment, test_id):
example = pytest.mark.xfail(example)
yield example
else:
raise NotImplementedError(message)
def migrate(fun):
"""Applies a term->term transform on all files in testdata/."""
for basename in os.listdir(TESTDATA):
assert basename.endswith('.sexpr'), basename
print('processing {}'.format(basename))
filename = os.path.join(TESTDATA, basename)
lines = []
with open(filename) as f:
for lineno, line in enumerate(f):
line = line.strip()
parts = line.split(';', 1)
sexpr = parts[0].strip()
comment = '' if len(parts) == 1 else parts[1]
if sexpr:
term = sexpr_parse(sexpr)
try:
term = fun(term)
except Exception:
print('Error at {}:{}'.format(basename, lineno + 1))
print(line)
raise
sexpr = sexpr_print(term)
if not comment:
line = sexpr
elif not sexpr:
line = ';{}'.format(comment)
else:
line = '{} ;{}'.format(sexpr, comment)
lines.append(line)
with open(filename, 'w') as f:
for line in lines:
f.write(line)
f.write('\n')
print('done')
@parsable
def reformat():
"""Reformat all files in testdata/."""
migrate(lambda x: x)
def _unquote_equal(term):
if not is_app(term) or not is_app(term[1]) or term[1][1] is not EQUAL:
return term
lhs = term[1][2]
rhs = term[2]
assert is_quote(lhs), lhs
assert is_quote(rhs), rhs
lhs = lhs[1]
rhs = rhs[1]
return APP(APP(EQUAL, lhs), rhs)
@parsable
def unquote_equal():
"""Convert (EQUAL (QUOTE x) (QUOTE y)) to (EQUAL x y)."""
migrate(_unquote_equal)
# ----------------------------------------------------------------------------
# property-based testing
alphabet = '_abcdefghijklmnopqrstuvwxyz'
s_vars = s.builds(
NVAR,
s.builds(str, s.text(alphabet=alphabet, min_size=1, average_size=5)),
)
s_atoms = s.one_of(
s.one_of(s_vars),
s.just(TOP),
s.just(BOT),
s.just(I),
s.just(K),
s.just(B),
s.just(C),
s.just(S),
s.just(Y),
s.one_of(
s.just(CODE),
s.just(EVAL),
s.just(QAPP),
s.just(QQUOTE),
s.just(QEQUAL),
s.just(QLESS),
),
s.one_of(
s.just(UNIT),
s.just(BOOL),
s.just(MAYBE),
),
)
s_sk_atoms = s.one_of(
s.one_of(s_vars),
s.just(TOP),
s.just(BOT),
s.just(I),
s.just(K),
s.just(B),
s.just(C),
s.just(S),
s.just(Y),
)
def s_sk_extend(terms):
return s.builds(APP, terms, terms)
def s_skj_extend(terms):
return s.one_of(
s.builds(APP, terms, terms),
s.builds(JOIN, terms, terms),
)
def s_terms_extend(terms):
return s.one_of(
s.builds(APP, terms, terms),
s.builds(JOIN, terms, terms),
s.builds(QUOTE, terms),
)
s_sk_terms = s.recursive(s_sk_atoms, s_sk_extend, max_leaves=100)
s_skj_terms = s.recursive(s_sk_atoms, s_skj_extend, max_leaves=100)
s_terms = s.recursive(s_atoms, s_terms_extend, max_leaves=100)
s_quoted = s.builds(QUOTE, s_terms)
if __name__ == '__main__':
parsable()
| 28.746341
| 79
| 0.53029
|
fd8f924a012561f87eb0cdddbe741ff8d7b0151d
| 8,525
|
py
|
Python
|
examples/tests/test_example_aircraft.py
|
GodotMisogi/openconcept
|
8206cb2d4303cb83e89d4075649bf69118384486
|
[
"MIT"
] | 22
|
2018-09-11T16:40:23.000Z
|
2022-03-22T09:45:53.000Z
|
examples/tests/test_example_aircraft.py
|
GodotMisogi/openconcept
|
8206cb2d4303cb83e89d4075649bf69118384486
|
[
"MIT"
] | 25
|
2018-12-16T20:32:17.000Z
|
2022-03-16T14:53:07.000Z
|
examples/tests/test_example_aircraft.py
|
GodotMisogi/openconcept
|
8206cb2d4303cb83e89d4075649bf69118384486
|
[
"MIT"
] | 24
|
2018-07-27T06:31:01.000Z
|
2022-03-13T15:27:07.000Z
|
from __future__ import division
import unittest
import numpy as np
from openmdao.utils.assert_utils import assert_near_equal, assert_check_partials
from openmdao.api import IndepVarComp, Group, Problem
from examples.B738 import run_738_analysis
from examples.TBM850 import run_tbm_analysis
from examples.HybridTwin_thermal import run_hybrid_twin_thermal_analysis
from examples.HybridTwin_active_thermal import run_hybrid_twin_active_thermal_analysis
from examples.HybridTwin import run_hybrid_twin_analysis
from examples.Caravan import run_caravan_analysis
from examples.KingAirC90GT import run_kingair_analysis
from examples.ElectricSinglewithThermal import run_electricsingle_analysis
from examples.N3_HybridSingleAisle_Refrig import run_hybrid_sa_analysis
class TBMAnalysisTestCase(unittest.TestCase):
def setUp(self):
self.prob = run_tbm_analysis()
def test_values_TBM(self):
prob = self.prob
assert_near_equal(prob.get_val('climb.OEW', units='lb'), 4756.772140709275, tolerance=1e-5)
assert_near_equal(prob.get_val('rotate.range_final', units='ft'), 2489.49501148, tolerance=1e-5)
assert_near_equal(prob.get_val('engineoutclimb.gamma',units='deg'), 8.78263, tolerance=1e-5)
assert_near_equal(prob.get_val('descent.fuel_used_final', units='lb'), 1607.84846911, tolerance=1e-5)
class CaravanAnalysisTestCase(unittest.TestCase):
def setUp(self):
self.prob = run_caravan_analysis()
def test_values_Caravan(self):
prob = self.prob
assert_near_equal(prob.get_val('v1vr.range_final', units='ft'), 1375.59921952, tolerance=1e-5)
assert_near_equal(prob.get_val('descent.fuel_used_final', units='lb'), 379.90334044, tolerance=1e-5)
class HybridTwinThermalTestCase(unittest.TestCase):
def setUp(self):
self.prob = run_hybrid_twin_thermal_analysis()
def test_values_thermalhybridtwin(self):
prob = self.prob
assert_near_equal(prob.get_val('climb.OEW', units='lb'), 6673.001027260613, tolerance=1e-5)
assert_near_equal(prob.get_val('rotate.range_final', units='ft'), 4434.68545427, tolerance=1e-5)
assert_near_equal(prob.get_val('engineoutclimb.gamma',units='deg'), 1.75074018, tolerance=1e-5)
assert_near_equal(prob.get_val('descent.fuel_used_final', units='lb'), 862.69811822, tolerance=1e-5)
assert_near_equal(prob.get_val('descent.propmodel.batt1.SOC_final', units=None), -3.80158704e-05, tolerance=1e-5)
assert_near_equal(prob.get_val('climb.propmodel.motorheatsink.T', units='degC')[-1], 76.19938727507775, tolerance=1e-5)
assert_near_equal(prob.get_val('climb.propmodel.batteryheatsink.T', units='degC')[-1], -0.27586540922391123, tolerance=1e-5)
assert_near_equal(prob.get_val('cruise.propmodel.duct.drag', units='lbf')[0], 7.968332825694923, tolerance=1e-5)
# changelog 10/2020 - updated most of the values due to minor update to hydraulic diam calculation in the heat exchanger
# 10/2021 commenting out because does not converge with the new chiller in chiller.py
# class HybridTwinActiveThermalTestCase(unittest.TestCase):
# def setUp(self):
# self.prob = run_hybrid_twin_active_thermal_analysis()
# def test_values_hybridtwin(self):
# prob = self.prob
# climb_duct_area = np.array([ 0.80614565, 3.25480096, 7.11240858, 12.075577 , 17.55488029,
# 23.40694116, 29.15510781, 34.44182758, 39.05343787, 43.00420553, 46.43866073])
# cruise_duct_area = np.array([17.17611522, 15.22748148, 14.66271227, 14.38669164, 14.2745505 ,
# 14.20434496, 14.15713767, 14.11684779, 14.08034799, 14.04524349, 14.01099713])
# prob.set_val('climb.propmodel.duct.area_nozzle', climb_duct_area, units='inch**2')
# prob.set_val('cruise.propmodel.duct.area_nozzle', cruise_duct_area, units='inch**2')
# prob.run_model()
# assert_near_equal(prob.get_val('climb.OEW', units='lb'), 6673.001027260613, tolerance=1e-5)
# # assert_near_equal(prob.get_val('descent.fuel_used_final', units='lb'), 871.66394047, tolerance=1e-5)
# # assert_near_equal(prob.get_val('descent.propmodel.batt1.SOC_final', units=None), 0.00484123, tolerance=1e-5)
# assert_near_equal(prob.get_val('climb.propmodel.duct.area_nozzle', units='inch**2'), climb_duct_area, tolerance=1e-5)
# assert_near_equal(prob.get_val('cruise.propmodel.duct.area_nozzle', units='inch**2'), cruise_duct_area, tolerance=1e-5)
# Wdot = np.array([ 6618.15094465, 17863.48477045, 25558.10458551, 30652.72996714, 33805.46342847, 35538.5460011,
# 36221.44062722, 36149.9707508, 35539.35428109, 34562.89222503, 33346.05141285])
# assert_near_equal(prob.get_val('climb.propmodel.refrig.elec_load', units='W'), Wdot, tolerance=1e-5)
# assert_near_equal(prob.get_val('cruise.propmodel.refrig.elec_load', units='W'), np.zeros(11), tolerance=1e-5)
# assert_near_equal(prob.get_val('climb.propmodel.motorheatsink.T', units='degC')[-1], 76.48202028574951, tolerance=1e-5)
# assert_near_equal(prob.get_val('climb.propmodel.batteryheatsink.T', units='degC')[-1], 6.9112870295027165, tolerance=1e-5)
# assert_near_equal(prob.get_val('cruise.propmodel.duct.drag', units='lbf')[-1], 1.5888992670493287, tolerance=1e-5)
# # changelog 10/2020 - updated most of the values due to minor update to hydraulic diam calculation in the heat exchanger
class HybridTwinTestCase(unittest.TestCase):
def setUp(self):
self.prob = run_hybrid_twin_analysis()
def test_values_hybridtwin(self):
prob = self.prob
assert_near_equal(prob.get_val('climb.OEW', units='lb'), 6648.424765080086, tolerance=1e-5)
assert_near_equal(prob.get_val('rotate.range_final', units='ft'), 4383.871458066499, tolerance=1e-5)
assert_near_equal(prob.get_val('engineoutclimb.gamma',units='deg'), 1.7659046316724112, tolerance=1e-5)
assert_near_equal(prob.get_val('descent.fuel_used_final', units='lb'), 854.8937776195904, tolerance=1e-5)
assert_near_equal(prob.get_val('descent.propmodel.batt1.SOC_final', units=None), -0.00030626412, tolerance=1e-5)
class KingAirTestCase(unittest.TestCase):
def setUp(self):
self.prob = run_kingair_analysis()
def test_values_kingair(self):
prob = self.prob
assert_near_equal(prob.get_val('climb.OEW', units='lb'), 6471.539115423346, tolerance=1e-5)
assert_near_equal(prob.get_val('rotate.range_final', units='ft'), 3054.61279799, tolerance=1e-5)
assert_near_equal(prob.get_val('descent.fuel_used_final', units='lb'), 1666.73459582, tolerance=1e-5)
class ElectricSingleTestCase(unittest.TestCase):
def setUp(self):
self.prob = run_electricsingle_analysis()
def test_values_electricsingle(self):
prob = self.prob
assert_near_equal(prob.get_val('rotate.range_final', units='ft'), 2419.111568458725, tolerance=1e-5)
assert_near_equal(prob.get_val('descent.propmodel.batt1.SOC')[-1], 0.1663373102614198, tolerance=1e-5)
assert_near_equal(prob.get_val('descent.propmodel.motorheatsink.T', units='degC')[-1], 14.906950172494192, tolerance=1e-5)
# changelog 10/2020 - heat sink T now 14.90695 after minor change to hydraulic diameter computation in heat exchanger
class B738TestCase(unittest.TestCase):
def setUp(self):
self.prob = run_738_analysis()
def test_values_B738(self):
prob = self.prob
# block fuel
assert_near_equal(prob.get_val('descent.fuel_used_final', units='lbm'), 28549.432517, tolerance=3e-4)
# changelog: 9/2020 - previously 28688.329, updated CFM surrogate model to reject spurious high Mach, low altitude points
# total fuel
assert_near_equal(prob.get_val('loiter.fuel_used_final', units='lbm'), 34424.68533072, tolerance=3e-4)
# changelog: 9/2020 - previously 34555.313, updated CFM surrogate model to reject spurious high Mach, low altitude points
class N3HSATestCase(unittest.TestCase):
def setUp(self):
self.prob = run_hybrid_sa_analysis(plots=False)
def test_values_N3HSA(self):
prob = self.prob
# block fuel (no reserve, since the N+3 HSA uses the basic 3-phase mission)
assert_near_equal(prob.get_val('descent.fuel_used_final', units='lbm'), 9006.52397811, tolerance=1e-5)
if __name__=="__main__":
unittest.main()
| 58.793103
| 132
| 0.723988
|
400e45d208b03f3a8b991a2a7396c8f55cb3163d
| 833
|
py
|
Python
|
pyspedas/particles/spd_part_products/spd_pgs_make_e_spec.py
|
pulupa/pyspedas
|
7228199cf16eca2a27d130f1e4985ef1e69462ea
|
[
"MIT"
] | 75
|
2019-02-22T12:59:33.000Z
|
2022-02-26T15:33:20.000Z
|
pyspedas/particles/spd_part_products/spd_pgs_make_e_spec.py
|
pulupa/pyspedas
|
7228199cf16eca2a27d130f1e4985ef1e69462ea
|
[
"MIT"
] | 40
|
2019-07-02T07:46:34.000Z
|
2022-02-23T21:48:50.000Z
|
pyspedas/particles/spd_part_products/spd_pgs_make_e_spec.py
|
pulupa/pyspedas
|
7228199cf16eca2a27d130f1e4985ef1e69462ea
|
[
"MIT"
] | 43
|
2019-02-22T13:03:41.000Z
|
2022-01-24T19:26:59.000Z
|
import numpy as np
# use nanmean from bottleneck if it's installed, otherwise use the numpy one
try:
import bottleneck as bn
nanmean = bn.nanmean
except ImportError:
nanmean = np.nanmean
def spd_pgs_make_e_spec(data_in):
"""
Builds energy spectrogram from the particle data structure
Input:
data_in: dict
Particle data structure
Returns:
Tuple containing: (energy values for the y-axis, spectrogram values)
"""
data = data_in.copy()
# zero inactive bins to ensure areas with no data are represented as NaN
zero_bins = np.argwhere(data['bins'] == 0)
if zero_bins.size != 0:
for item in zero_bins:
data['data'][item[0], item[1]] = 0.0
ave = nanmean(data['data'], axis=1)
y = data['energy'][:, 0]
return (y, ave)
| 22.513514
| 76
| 0.632653
|
75846b7ecb6fcac2b940d28dd52eff4993076e88
| 15,593
|
py
|
Python
|
tests/models/test_cpu.py
|
ameliatqy/pytorch-lightning
|
ca18e11f6efe822098c79e7d9124b08a55bcd908
|
[
"Apache-2.0"
] | null | null | null |
tests/models/test_cpu.py
|
ameliatqy/pytorch-lightning
|
ca18e11f6efe822098c79e7d9124b08a55bcd908
|
[
"Apache-2.0"
] | null | null | null |
tests/models/test_cpu.py
|
ameliatqy/pytorch-lightning
|
ca18e11f6efe822098c79e7d9124b08a55bcd908
|
[
"Apache-2.0"
] | null | null | null |
import os
import platform
import pytest
import torch
from packaging.version import parse as version_parse
import tests.base.develop_pipelines as tpipes
import tests.base.develop_utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core.step_result import TrainResult
from tests.base import EvalModelTemplate
def test_cpu_slurm_save_load(tmpdir):
"""Verify model save/load/checkpoint on CPU."""
hparams = EvalModelTemplate.get_default_hparams()
model = EvalModelTemplate(**hparams)
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
version = logger.version
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
limit_train_batches=0.2,
limit_val_batches=0.2,
checkpoint_callback=ModelCheckpoint(tmpdir),
)
result = trainer.fit(model)
real_global_step = trainer.global_step
# traning complete
assert result == 1, 'cpu model failed to complete'
# predict with trained model before saving
# make a prediction
dataloaders = model.test_dataloader()
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
for dataloader in dataloaders:
for batch in dataloader:
break
x, y = batch
x = x.view(x.size(0), -1)
model.eval()
pred_before_saving = model(x)
# test HPC saving
# simulate snapshot on slurm
saved_filepath = trainer.hpc_save(trainer.weights_save_path, logger)
assert os.path.exists(saved_filepath)
# new logger file to get meta
logger = tutils.get_default_logger(tmpdir, version=version)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=logger,
checkpoint_callback=ModelCheckpoint(tmpdir),
)
model = EvalModelTemplate(**hparams)
# set the epoch start hook so we can predict before the model does the full training
def assert_pred_same():
assert trainer.global_step == real_global_step and trainer.global_step > 0
# predict with loaded model to make sure answers are the same
trainer.model.eval()
new_pred = trainer.model(x)
assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1
model.on_epoch_start = assert_pred_same
# by calling fit again, we trigger training, loading weights from the cluster
# and our hook to predict using current model before any more weight updates
trainer.fit(model)
def test_early_stopping_cpu_model(tmpdir):
"""Test each of the trainer options."""
stopping = EarlyStopping(monitor='val_loss', min_delta=0.1)
trainer_options = dict(
default_root_dir=tmpdir,
early_stop_callback=stopping,
max_epochs=2,
gradient_clip_val=1.0,
overfit_batches=0.20,
track_grad_norm=2,
limit_train_batches=0.1,
limit_val_batches=0.1,
)
model = EvalModelTemplate()
tpipes.run_model_test(trainer_options, model, on_gpu=False)
# test freeze on cpu
model.freeze()
model.unfreeze()
@pytest.mark.skipif(platform.system() == "Windows",
reason="Distributed training is not supported on Windows")
@pytest.mark.skipif((platform.system() == "Darwin" and
version_parse(torch.__version__) < version_parse("1.3.0")),
reason="Distributed training is not supported on MacOS before Torch 1.3.0")
def test_multi_cpu_model_ddp(tmpdir):
"""Make sure DDP works."""
tutils.set_random_master_port()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
gpus=None,
num_processes=2,
distributed_backend='ddp_cpu',
)
model = EvalModelTemplate()
tpipes.run_model_test(trainer_options, model, on_gpu=False)
def test_lbfgs_cpu_model(tmpdir):
"""Test each of the trainer options."""
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
progress_bar_refresh_rate=0,
weights_summary='top',
limit_train_batches=0.2,
limit_val_batches=0.2,
)
hparams = EvalModelTemplate.get_default_hparams()
hparams.update(optimizer_name='lbfgs',
learning_rate=0.004)
model = EvalModelTemplate(**hparams)
model.configure_optimizers = model.configure_optimizers__lbfgs
tpipes.run_model_test_without_loggers(trainer_options, model, min_acc=0.25)
def test_default_logger_callbacks_cpu_model(tmpdir):
"""Test each of the trainer options."""
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
gradient_clip_val=1.0,
overfit_batches=0.20,
progress_bar_refresh_rate=0,
limit_train_batches=0.01,
limit_val_batches=0.01,
)
model = EvalModelTemplate()
tpipes.run_model_test_without_loggers(trainer_options, model)
# test freeze on cpu
model.freeze()
model.unfreeze()
def test_running_test_after_fitting(tmpdir):
"""Verify test() on fitted model."""
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=0.4,
limit_val_batches=0.2,
limit_test_batches=0.2,
checkpoint_callback=checkpoint,
logger=logger,
)
result = trainer.fit(model)
assert result == 1, 'training failed to complete'
trainer.test()
# test we have good test accuracy
tutils.assert_ok_model_acc(trainer, thr=0.5)
def test_running_test_no_val(tmpdir):
"""Verify `test()` works on a model with no `val_loader`."""
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.2,
limit_test_batches=0.2,
checkpoint_callback=checkpoint,
logger=logger,
early_stop_callback=False,
)
result = trainer.fit(model)
assert result == 1, 'training failed to complete'
trainer.test()
# test we have good test accuracy
tutils.assert_ok_model_acc(trainer)
def test_simple_cpu(tmpdir):
"""Verify continue training session on CPU."""
model = EvalModelTemplate()
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=20,
)
result = trainer.fit(model)
# traning complete
assert result == 1, 'amp + ddp model failed to complete'
def test_cpu_model(tmpdir):
"""Make sure model trains on CPU."""
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.4
)
model = EvalModelTemplate()
tpipes.run_model_test(trainer_options, model, on_gpu=False)
def test_all_features_cpu_model(tmpdir):
"""Test each of the trainer options."""
trainer_options = dict(
default_root_dir=tmpdir,
gradient_clip_val=1.0,
overfit_batches=0.20,
track_grad_norm=2,
progress_bar_refresh_rate=0,
accumulate_grad_batches=2,
max_epochs=1,
limit_train_batches=0.4,
limit_val_batches=0.4
)
model = EvalModelTemplate()
tpipes.run_model_test(trainer_options, model, on_gpu=False)
def test_tbptt_cpu_model(tmpdir):
"""Test truncated back propagation through time works."""
truncated_bptt_steps = 2
sequence_size = 30
batch_size = 30
x_seq = torch.rand(batch_size, sequence_size, 1)
y_seq_list = torch.rand(batch_size, sequence_size, 1).tolist()
class MockSeq2SeqDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return x_seq, y_seq_list
def __len__(self):
return 1
class BpttTestModel(EvalModelTemplate):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_hidden = None
def training_step(self, batch, batch_idx, hiddens):
assert hiddens == self.test_hidden, "Hidden state not persistent between tbptt steps"
self.test_hidden = torch.rand(1)
x_tensor, y_list = batch
assert x_tensor.shape[1] == truncated_bptt_steps, "tbptt split Tensor failed"
y_tensor = torch.tensor(y_list, dtype=x_tensor.dtype)
assert y_tensor.shape[1] == truncated_bptt_steps, "tbptt split list failed"
pred = self(x_tensor.view(batch_size, truncated_bptt_steps))
loss_val = torch.nn.functional.mse_loss(
pred, y_tensor.view(batch_size, truncated_bptt_steps))
return {
'loss': loss_val,
'hiddens': self.test_hidden,
}
def training_epoch_end(self, training_step_outputs):
training_step_outputs = training_step_outputs[0]
assert len(training_step_outputs) == (sequence_size / truncated_bptt_steps)
loss = torch.stack([x['loss'] for x in training_step_outputs]).mean()
return {'log': {'train_loss': loss}}
def train_dataloader(self):
return torch.utils.data.DataLoader(
dataset=MockSeq2SeqDataset(),
batch_size=batch_size,
shuffle=False,
sampler=None,
)
hparams = EvalModelTemplate.get_default_hparams()
hparams.update(
batch_size=batch_size,
in_features=truncated_bptt_steps,
hidden_dim=truncated_bptt_steps,
out_features=truncated_bptt_steps
)
model = BpttTestModel(**hparams)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
truncated_bptt_steps=truncated_bptt_steps,
limit_val_batches=0,
weights_summary=None,
early_stop_callback=False,
)
result = trainer.fit(model)
assert result == 1, 'training failed to complete'
def test_tbptt_cpu_model_result(tmpdir):
"""Test truncated back propagation through time works."""
truncated_bptt_steps = 2
sequence_size = 30
batch_size = 30
x_seq = torch.rand(batch_size, sequence_size, 1)
y_seq_list = torch.rand(batch_size, sequence_size, 1).tolist()
class MockSeq2SeqDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return x_seq, y_seq_list
def __len__(self):
return 1
class BpttTestModel(EvalModelTemplate):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_hidden = None
def training_step(self, batch, batch_idx, hiddens):
assert hiddens == self.test_hidden, "Hidden state not persistent between tbptt steps"
self.test_hidden = torch.rand(1)
x_tensor, y_list = batch
assert x_tensor.shape[1] == truncated_bptt_steps, "tbptt split Tensor failed"
y_tensor = torch.tensor(y_list, dtype=x_tensor.dtype)
assert y_tensor.shape[1] == truncated_bptt_steps, "tbptt split list failed"
pred = self(x_tensor.view(batch_size, truncated_bptt_steps))
loss_val = torch.nn.functional.mse_loss(
pred, y_tensor.view(batch_size, truncated_bptt_steps))
result = TrainResult(loss_val, hiddens=self.test_hidden)
return result
def training_epoch_end(self, training_step_outputs):
result = training_step_outputs
assert isinstance(result, TrainResult)
assert result.minimize.size(1) == (sequence_size / truncated_bptt_steps)
result.minimize = result.minimize.mean()
return result
def train_dataloader(self):
return torch.utils.data.DataLoader(
dataset=MockSeq2SeqDataset(),
batch_size=batch_size,
shuffle=False,
sampler=None,
)
hparams = EvalModelTemplate.get_default_hparams()
hparams.update(
batch_size=batch_size,
in_features=truncated_bptt_steps,
hidden_dim=truncated_bptt_steps,
out_features=truncated_bptt_steps
)
model = BpttTestModel(**hparams)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
truncated_bptt_steps=truncated_bptt_steps,
limit_val_batches=0,
weights_summary=None,
early_stop_callback=False,
)
result = trainer.fit(model)
assert result == 1, 'training failed to complete'
def test_tbptt_cpu_model_result_auto_reduce(tmpdir):
"""Test truncated back propagation through time works."""
truncated_bptt_steps = 2
sequence_size = 30
batch_size = 30
x_seq = torch.rand(batch_size, sequence_size, 1)
y_seq_list = torch.rand(batch_size, sequence_size, 1).tolist()
class MockSeq2SeqDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return x_seq, y_seq_list
def __len__(self):
return 1
class BpttTestModel(EvalModelTemplate):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_hidden = None
def training_step(self, batch, batch_idx, hiddens):
assert hiddens == self.test_hidden, "Hidden state not persistent between tbptt steps"
self.test_hidden = torch.rand(1)
x_tensor, y_list = batch
assert x_tensor.shape[1] == truncated_bptt_steps, "tbptt split Tensor failed"
y_tensor = torch.tensor(y_list, dtype=x_tensor.dtype)
assert y_tensor.shape[1] == truncated_bptt_steps, "tbptt split list failed"
pred = self(x_tensor.view(batch_size, truncated_bptt_steps))
loss_val = torch.nn.functional.mse_loss(
pred, y_tensor.view(batch_size, truncated_bptt_steps))
result = TrainResult(loss_val, hiddens=self.test_hidden)
return result
def train_dataloader(self):
return torch.utils.data.DataLoader(
dataset=MockSeq2SeqDataset(),
batch_size=batch_size,
shuffle=False,
sampler=None,
)
hparams = EvalModelTemplate.get_default_hparams()
hparams.update(
batch_size=batch_size,
in_features=truncated_bptt_steps,
hidden_dim=truncated_bptt_steps,
out_features=truncated_bptt_steps
)
model = BpttTestModel(**hparams)
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
truncated_bptt_steps=truncated_bptt_steps,
limit_val_batches=0,
weights_summary=None,
early_stop_callback=False,
)
result = trainer.fit(model)
assert result == 1, 'training failed to complete'
| 30.57451
| 97
| 0.660104
|
c15715e95a2874c6fd7c0b9e56e193c99a46dcdb
| 1,744
|
py
|
Python
|
src/lib/network.py
|
Darksider3/monitorNet
|
1c0419ee6cb3191c5aaa347ffe0fcffc4992f391
|
[
"BSD-2-Clause"
] | null | null | null |
src/lib/network.py
|
Darksider3/monitorNet
|
1c0419ee6cb3191c5aaa347ffe0fcffc4992f391
|
[
"BSD-2-Clause"
] | 6
|
2017-02-18T22:25:56.000Z
|
2017-03-08T10:30:34.000Z
|
src/lib/network.py
|
Darksider3/monitorNet
|
1c0419ee6cb3191c5aaa347ffe0fcffc4992f391
|
[
"BSD-2-Clause"
] | null | null | null |
import requests
import time
import os
import colors
class network:
def __init__(self, host, httpTimeout=3, pingTimeout=1000, sleepTimer=10, httpExceptionTimer=2, defaultProt="https://"):
self.httpTimeout = httpTimeout
self.pingTimeout = pingTimeout
self.sleepTimer = sleepTimer
self.httpExceptionTimer = httpExceptionTimer
self.host=host
self.defaultProt=defaultProt
self.__checkHost__()
self.score = 0
def __checkHost__(self):
if "https://" not in self.host:
if "http://" not in self.host:
self.prot=self.defaultProt
self.host = self.defaultProt + self.host
else:
self.prot="http://"
else:
self.prot="https://"
def ping(self):
# strip the protocol from the string
self.host=self.host.replace(self.prot, '')
# ping
timeoutparam = "-w %s" % self.pingTimeout if colors.OS == "NT" else ""
param = "-n 1" if colors.OS == "NT" else "-c 1"
out = ">NUL" if colors.OS == "NT" else "2>&1 /dev/null"
status=os.system("ping %s %s %s %s" % (timeoutparam, param, self.host, out)) == 0
# revert changes
self.host=self.prot+self.host
return status
def httpsUp(self):
try:
requests.get(self.host, verify=True, timeout=self.httpTimeout)
return True
except requests.exceptions.RequestException as e:
time.sleep(self.httpExceptionTimer)
return False
def __scoreAdd__(self, num):
self.score += num
def __scoreSub__(self, num):
self.score -= num
| 29.559322
| 124
| 0.563647
|
cd6d3d2e70f1916d306543fe3214c8476a9f1558
| 15,185
|
py
|
Python
|
python/mxnet/image.py
|
bill-teng/mxnet-test
|
39a2c0cff1be8b8277b2e0a8c55214acc186a49c
|
[
"Apache-2.0"
] | 2
|
2017-05-31T02:18:53.000Z
|
2021-04-08T04:50:32.000Z
|
python/mxnet/image.py
|
bill-teng/mxnet-test
|
39a2c0cff1be8b8277b2e0a8c55214acc186a49c
|
[
"Apache-2.0"
] | null | null | null |
python/mxnet/image.py
|
bill-teng/mxnet-test
|
39a2c0cff1be8b8277b2e0a8c55214acc186a49c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Image IO API of mxnet."""
from __future__ import absolute_import, print_function
from .base import numeric_types
import os
import random
import logging
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from . import ndarray as nd
from . import _ndarray_internal as _internal
from ._ndarray_internal import _cvimresize as imresize
from ._ndarray_internal import _cvcopyMakeBorder as copyMakeBorder
from . import io
from . import recordio
def imdecode(buf, **kwargs):
"""Decode an image from string. Requires OpenCV to work.
Parameters
----------
buf : str/bytes, or numpy.ndarray
Binary image data.
flag : int
0 for grayscale. 1 for colored.
to_rgb : int
0 for BGR format (OpenCV default). 1 for RGB format (MXNet default).
out : NDArray
Output buffer. Use None for automatic allocation.
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, **kwargs)
def scale_down(src_size, size):
"""Scale down crop size if it's bigger than image size"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w*sh)/h, sh
if sw < w:
w, h = sw, float(h*sw)/w
return int(w), int(h)
def resize_short(src, size, interp=2):
"""Resize shorter edge to size"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size*h/w, size
else:
new_h, new_w = size, size*w/h
return imresize(src, new_w, new_h, interp=interp)
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size"""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0+h, x0+w, int(src.shape[2])))
if size is not None and (w, h) != size:
out = imresize(out, *size, interp=interp)
return out
def random_crop(src, size, interp=2):
"""Randomly crop src with size. Upsample result if src is smaller than size"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Randomly crop src with size. Upsample result if src is smaller than size"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = (w - new_w)/2
y0 = (h - new_h)/2
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std"""
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, min_area, ratio, interp=2):
"""Randomly crop src with size. Randomize area and aspect ratio"""
h, w, _ = src.shape
new_ratio = random.uniform(*ratio)
if new_ratio * h > w:
max_area = w*int(w/new_ratio)
else:
max_area = h*int(h*new_ratio)
min_area *= h*w
if max_area < min_area:
return random_crop(src, size, interp)
new_area = random.uniform(min_area, max_area)
new_w = int(np.sqrt(new_area*new_ratio))
new_h = int(np.sqrt(new_area/new_ratio))
assert new_w <= w and new_h <= h
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def ResizeAug(size, interp=2):
"""Make resize shorter edge to size augumenter"""
def aug(src):
"""Augumenter body"""
return [resize_short(src, size, interp)]
return aug
def RandomCropAug(size, interp=2):
"""Make random crop augumenter"""
def aug(src):
"""Augumenter body"""
return [random_crop(src, size, interp)[0]]
return aug
def RandomSizedCropAug(size, min_area, ratio, interp=2):
"""Make random crop with random resizing and random aspect ratio jitter augumenter"""
def aug(src):
"""Augumenter body"""
return [random_size_crop(src, size, min_area, ratio, interp)[0]]
return aug
def CenterCropAug(size, interp=2):
"""Make center crop augmenter"""
def aug(src):
"""Augumenter body"""
return [center_crop(src, size, interp)[0]]
return aug
def RandomOrderAug(ts):
"""Apply list of augmenters in random order"""
def aug(src):
"""Augumenter body"""
src = [src]
random.shuffle(ts)
for t in ts:
src = [j for i in src for j in t(i)]
return src
return aug
def ColorJitterAug(brightness, contrast, saturation):
"""Apply random brightness, contrast and saturation jitter in random order"""
ts = []
coef = nd.array([[[0.299, 0.587, 0.114]]])
if brightness > 0:
def baug(src):
"""Augumenter body"""
alpha = 1.0 + random.uniform(-brightness, brightness)
src *= alpha
return [src]
ts.append(baug)
if contrast > 0:
def caug(src):
"""Augumenter body"""
alpha = 1.0 + random.uniform(-contrast, contrast)
gray = src*coef
gray = (3.0*(1.0-alpha)/gray.size)*nd.sum(gray)
src *= alpha
src += gray
return [src]
ts.append(caug)
if saturation > 0:
def saug(src):
"""Augumenter body"""
alpha = 1.0 + random.uniform(-saturation, saturation)
gray = src*coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0-alpha)
src *= alpha
src += gray
return [src]
ts.append(saug)
return RandomOrderAug(ts)
def LightingAug(alphastd, eigval, eigvec):
"""Add PCA based noise"""
def aug(src):
"""Augumenter body"""
alpha = np.random.normal(0, alphastd, size=(3,))
rgb = np.dot(eigvec*alpha, eigval)
src += nd.array(rgb)
return [src]
return aug
def ColorNormalizeAug(mean, std):
"""Mean and std normalization"""
mean = nd.array(mean)
std = nd.array(std)
def aug(src):
"""Augumenter body"""
return [color_normalize(src, mean, std)]
return aug
def HorizontalFlipAug(p):
"""Random horizontal flipping"""
def aug(src):
"""Augumenter body"""
if random.random() < p:
src = nd.flip(src, axis=1)
return [src]
return aug
def CastAug():
"""Cast to float32"""
def aug(src):
"""Augumenter body"""
src = src.astype(np.float32)
return [src]
return aug
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0,
pca_noise=0, inter_method=2):
"""Create augumenter list"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.3, (3.0/4.0, 4.0/3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if mean is True:
mean = np.array([123.68, 116.28, 103.53])
if std is True:
std = np.array([58.395, 57.12, 57.375])
if mean is not None:
assert std is not None
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augumentation choices.
Supports reading from both .rec files and raw image files with image list.
To load from .rec files, please specify path_imgrec. Also specify path_imgidx
to use data partition (for distributed training) or shuffling.
To load from raw image files, specify path_imglist and path_root.
Parameters
----------
batch_size : int
Number of examples per batch
data_shape : tuple
Data shape in (channels, height, width).
For now, only RGB image with 3 channels is supported.
label_width : int
dimension of label
path_imgrec : str
path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec
path_imglist : str
path to image list (.lst)
Created with tools/im2rec.py or with custom script.
Format: index\t[one or more label separated by \t]\trelative_path_from_root
imglist: list
a list of image with the label(s)
each item is a list [imagelabel: float or list of float, imgpath]
path_root : str
Root folder of image files
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration.
Can be slow for HDD.
part_index : int
Partition index
num_parts : int
Total number of partitions.
kwargs : ...
More arguments for creating augumenter. See mx.image.CreateAugmenter
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None, **kwargs):
super(ImageIter, self).__init__()
assert(path_imgrec or path_imglist or (isinstance(imglist, list)))
if path_imgrec:
print('loading recordio...')
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r')
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
print('loading image list...')
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array([float(i) for i in line[1:-1]])
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
print('loading image list...')
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index)
index += 1
if isinstance(img[0], numeric_types):
label = nd.array([img[0]])
else:
label = nd.array(img[0])
result[key] = (label, img[1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
assert len(data_shape) == 3 and data_shape[0] == 3
self.provide_data = [('data', (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [('softmax_label', (batch_size, label_width))]
else:
self.provide_label = [('softmax_label', (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N/num_parts
self.seq = self.seq[part_index*C:(part_index+1)*C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self.reset()
def reset(self):
if self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
def next_sample(self):
"""helper function for reading in next sample"""
if self.seq is not None:
if self.cur >= len(self.seq):
raise StopIteration
idx = self.seq[self.cur]
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
if self.imgrec is None:
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return label, img
else:
s = self.imgrec.read()
if s is None:
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def next(self):
batch_size = self.batch_size
c, h, w = self.data_shape
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = 0
try:
while i < batch_size:
label, s = self.next_sample()
data = [imdecode(s)]
if len(data[0].shape) == 0:
logging.debug('Invalid image, skipping.')
continue
for aug in self.auglist:
data = [ret for src in data for ret in aug(src)]
for d in data:
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i][:] = nd.transpose(d, axes=(2, 0, 1))
batch_label[i][:] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return io.DataBatch([batch_data], [batch_label], batch_size-1-i)
| 33.300439
| 112
| 0.57537
|
166ae17b118a69d45a11a46724665edcf584fc34
| 4,219
|
py
|
Python
|
tests/python/small/test_glm_ipums.py
|
pnijhara/h2o4gpu
|
6257112c134136471420b68241f57190a445b67d
|
[
"Apache-2.0"
] | 458
|
2017-09-20T08:32:10.000Z
|
2022-02-28T18:40:57.000Z
|
tests/python/small/test_glm_ipums.py
|
Jun-NIBS/h2o4gpu
|
9885416deb3285f5d0f33023d6c07373ac4fc0b7
|
[
"Apache-2.0"
] | 461
|
2017-09-20T11:39:04.000Z
|
2021-11-21T15:51:42.000Z
|
tests/python/small/test_glm_ipums.py
|
Jun-NIBS/h2o4gpu
|
9885416deb3285f5d0f33023d6c07373ac4fc0b7
|
[
"Apache-2.0"
] | 114
|
2017-09-20T12:08:07.000Z
|
2021-11-29T14:15:40.000Z
|
# -*- encoding: utf-8 -*-
"""
GLM solver tests using Kaggle datasets.
:copyright: 2017-2018 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
import time
import sys
import os
import numpy as np
import pandas as pd
import logging
print(sys.path)
from h2o4gpu.util.testing_utils import find_file, run_glm
logging.basicConfig(level=logging.DEBUG)
def fun(nGPUs=1, nFolds=1, nLambdas=100, nAlphas=8, validFraction=0.2):
name = str(sys._getframe().f_code.co_name)
name = sys._getframe(1).f_code.co_name
t = time.time()
print("cwd: %s" % (os.getcwd()))
sys.stdout.flush()
print("Reading Data")
# from numpy.random import randn
# m=1000
# n=100
# A=randn(m,n)
# x_true=(randn(n)/n)*float64(randn(n)<0.8)
# b=A.dot(x_true)+0.5*randn(m)
# Rscript ipums.R runs glmnet on ipums
#
df = pd.read_csv("./data/ipums_1k.csv")
print(df.shape)
X = np.array(df.iloc[:, :df.shape[1] - 1], dtype='float32', order='C')
y = np.array(df.iloc[:, df.shape[1] - 1], dtype='float32', order='C')
#X = df.iloc[:, :df.shape[1] - 1]
#y = df.iloc[:, df.shape[1] - 1]
t1 = time.time()
# by default, tol=1E-2 for h2o4gpu and don't try to be strictly as accurate as h2o-3, so use tolernace that represents how well we expect to do with default tol for glm algo.
rmse_train, rmse_test = run_glm(X, y, nGPUs=nGPUs, nlambda=nLambdas, nfolds=nFolds, nalpha=nAlphas,
validFraction=validFraction, verbose=0, name=name, tolerance = 0.34)
# check rmse
print(rmse_train[0, 0])
print(rmse_train[0, 1])
print(rmse_train[0, 2])
print(rmse_test[0, 2])
sys.stdout.flush()
if validFraction==0.0:
assert rmse_train[0, 0] < 54000
assert rmse_train[0, 1] < 54000
assert rmse_train[0, 2] < 54000
assert rmse_test[0, 2] < 54000
else:
if nLambdas>20:
assert rmse_train[0, 0] < 50000
assert rmse_train[0, 1] < 50000
assert rmse_train[0, 2] < 50000
assert rmse_test[0, 2] < 50000
else:
assert rmse_train[0, 0] < 59000
assert rmse_train[0, 1] < 59000
assert rmse_train[0, 2] < 59000
assert rmse_test[0, 2] < 59000
print('/n Total execution time:%d' % (time.time() - t1))
print("TEST PASSED")
sys.stdout.flush()
print("Time taken: {}".format(time.time() - t))
# endfunnel(pipes)
print("DONE.")
sys.stdout.flush()
def test_glm_ipums_gpu_fold1_quick_0(): fun(nGPUs=1, nFolds=1, nLambdas=3, nAlphas=3, validFraction=0)
def test_glm_ipums_gpu_fold1_0(): fun(nGPUs=1, nFolds=1, nLambdas=20, nAlphas=3, validFraction=0)
def test_glm_ipums_gpu_fold5_0(): fun(nGPUs=1, nFolds=5, nLambdas=20, nAlphas=3, validFraction=0)
def test_glm_ipums_gpu_fold1_quick(): fun(nGPUs=1, nFolds=1, nLambdas=5, nAlphas=3, validFraction=0.2)
def test_glm_ipums_gpu_fold1(): fun(nGPUs=1, nFolds=1, nLambdas=20, nAlphas=3, validFraction=0.2)
def test_glm_ipums_gpu_fold5(): fun(nGPUs=1, nFolds=5, nLambdas=20, nAlphas=3, validFraction=0.2)
def test_glm_ipums_gpu2_fold1_quick(): fun(nGPUs=2, nFolds=1, nLambdas=3, nAlphas=3, validFraction=0.2)
def test_glm_ipums_gpu2_fold1(): fun(nGPUs=2, nFolds=1, nLambdas=20, nAlphas=3, validFraction=0.2)
def test_glm_ipums_gpu2_fold5(): fun(nGPUs=3, nFolds=5, nLambdas=20, nAlphas=3, validFraction=0.2)
# def test_glm_ipums_cpu_fold1_quick(): fun(nGPUs=0, nFolds=1, nLambdas=3, nAlphas=3, validFraction=0.2)
#
#
# def test_glm_ipums_cpu_fold1(): fun(nGPUs=0, nFolds=1, nLambdas=20, nAlphas=3, validFraction=0.2)
#
#
# def test_glm_ipums_cpu_fold5(): fun(nGPUs=0, nFolds=5, nLambdas=20, nAlphas=3, validFraction=0.2)
if __name__ == '__main__':
test_glm_ipums_gpu_fold1_quick_0()
test_glm_ipums_gpu_fold1_0()
test_glm_ipums_gpu_fold5_0()
test_glm_ipums_gpu_fold1_quick()
test_glm_ipums_gpu_fold1()
test_glm_ipums_gpu_fold5()
test_glm_ipums_gpu2_fold1_quick()
test_glm_ipums_gpu2_fold1()
test_glm_ipums_gpu2_fold5()
# test_glm_ipums_cpu_fold1_quick()
# test_glm_ipums_cpu_fold1()
# test_glm_ipums_cpu_fold5()
| 30.352518
| 178
| 0.666983
|
319c2468ccc4037debd01f5ab609db0f96c15c14
| 1,331
|
py
|
Python
|
tests/ens/test_get_registry.py
|
DanielVF/web3.py
|
c25be34450c5003d16a937e27c1d5a26187985f1
|
[
"MIT"
] | null | null | null |
tests/ens/test_get_registry.py
|
DanielVF/web3.py
|
c25be34450c5003d16a937e27c1d5a26187985f1
|
[
"MIT"
] | null | null | null |
tests/ens/test_get_registry.py
|
DanielVF/web3.py
|
c25be34450c5003d16a937e27c1d5a26187985f1
|
[
"MIT"
] | null | null | null |
import pytest
from unittest.mock import (
patch,
)
from ens import ENS
from web3 import Web3
def test_resolver_empty(ens):
with patch.object(ens.ens, 'resolver', return_value=None):
assert ens.resolver('') is None
@pytest.mark.parametrize(
'address, expected_reverse',
[
(
'0x1111111111111111111111111111111111111111',
'1111111111111111111111111111111111111111.addr.reverse',
),
(
'0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413',
'bb9bc244d798123fde783fcc1c72d3bb8c189413.addr.reverse',
),
],
)
def test_reverse_domain(address, expected_reverse):
assert ENS.reverse_domain(address) == expected_reverse
@pytest.mark.parametrize(
'label, expected_hash',
[
('eth', '0x4f5b812789fc606be1b3b16908db13fc7a9adf7ca72641f84d75b47069d3d7f0'),
('ETH', '0x4f5b812789fc606be1b3b16908db13fc7a9adf7ca72641f84d75b47069d3d7f0'),
('a.b', ValueError),
],
)
def test_labelhash(ens, label, expected_hash):
if isinstance(expected_hash, type):
with pytest.raises(expected_hash):
ens.labelhash(label)
else:
labelhash = ens.labelhash(label)
assert isinstance(labelhash, bytes)
hash_hex = Web3.toHex(labelhash)
assert hash_hex == expected_hash
| 26.62
| 86
| 0.677686
|
45d8db48d9a1a3869d35a01ee1289d4411ddc079
| 2,174
|
py
|
Python
|
py/g1/bases/g1/bases/datetimes.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | 3
|
2016-01-04T06:28:52.000Z
|
2020-09-20T13:18:40.000Z
|
py/g1/bases/g1/bases/datetimes.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | null | null | null |
py/g1/bases/g1/bases/datetimes.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | null | null | null |
"""Extension of standard library's datetime.
A common pitfall of using datetime to store a timestamps is not setting
timezone to UTC, which is then default to the local timezone. This
produces wrong results when converting datetime-represented timestamp
to/from a number-represented timestamp. Specifically, when the local
timezone is not UTC, ``datetime.fromtimestamp(0)`` does not return
1970-01-01, and ``datetime(1970, 1, 1).timestamp()`` does not return 0.
All timestamp helpers of this module will set timezone to UTC.
"""
__all__ = [
'UNIX_EPOCH',
'fromisoformat',
'make_timestamp',
'timestamp_date',
'utcfromtimestamp',
'utcnow',
]
import datetime
def fromisoformat(string):
"""Parse a timestamp with datetime.fromisoformat."""
timestamp = datetime.datetime.fromisoformat(string)
if timestamp.tzinfo is None:
return timestamp.replace(tzinfo=datetime.timezone.utc)
else:
return timestamp.astimezone(datetime.timezone.utc)
def make_timestamp(
year, month, day, hour=0, minute=0, second=0, microsecond=0
):
"""Make a datetime-represented timestamp."""
return datetime.datetime(
year,
month,
day,
hour,
minute,
second,
microsecond,
datetime.timezone.utc,
)
def timestamp_date(timestamp):
"""Keep only the date part of a datetime-represented timestamp."""
return datetime.datetime(
year=timestamp.year,
month=timestamp.month,
day=timestamp.day,
tzinfo=datetime.timezone.utc,
)
def utcfromtimestamp(timestamp):
"""Create a ``datetime`` object from timestamp.
Unlike stdlib's ``utcfromtimestamp``, this also sets ``tzinfo`` to
UTC; without this, ``timestamp()`` will return incorrect number.
"""
return datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc)
def utcnow():
"""Return a ``datetime`` object of now.
Unlike stdlib's ``utcnow``, this also sets ``tzinfo`` to UTC;
without this, ``timestamp()`` will return incorrect number.
"""
return datetime.datetime.now(datetime.timezone.utc)
UNIX_EPOCH = utcfromtimestamp(0)
| 27.871795
| 76
| 0.688132
|
286dc54d6aac3a474ccde85c06db526d22ae222b
| 1,061
|
py
|
Python
|
koans/about_triangle_project.py
|
shibamirai/python_koans
|
c1eba327dbe9534042e510dfce4e0c49c853255e
|
[
"MIT"
] | null | null | null |
koans/about_triangle_project.py
|
shibamirai/python_koans
|
c1eba327dbe9534042e510dfce4e0c49c853255e
|
[
"MIT"
] | null | null | null |
koans/about_triangle_project.py
|
shibamirai/python_koans
|
c1eba327dbe9534042e510dfce4e0c49c853255e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
# この問題は、triangle.py の triangle 関数を修正してください
# You need to write the triangle method in the file 'triangle.py'
from .triangle import *
class AboutTriangleProject(Koan):
"三角形プロジェクト"
def test_equilateral_triangles_have_equal_sides(self):
"正三角形は、長さの等しい辺を持ちます"
self.assertEqual('equilateral', triangle(2, 2, 2))
self.assertEqual('equilateral', triangle(10, 10, 10))
def test_isosceles_triangles_have_exactly_two_sides_equal(self):
"二等辺三角形は、長さが等しい辺を2つ持ちます"
self.assertEqual('isosceles', triangle(3, 4, 4))
self.assertEqual('isosceles', triangle(4, 3, 4))
self.assertEqual('isosceles', triangle(4, 4, 3))
self.assertEqual('isosceles', triangle(10, 10, 2))
def test_scalene_triangles_have_no_equal_sides(self):
"不等辺三角形は、すべての辺の長さが異なります"
self.assertEqual('scalene', triangle(3, 4, 5))
self.assertEqual('scalene', triangle(10, 11, 12))
self.assertEqual('scalene', triangle(5, 4, 2))
| 35.366667
| 68
| 0.683318
|
667ee1028ad4f21a2d52a2c78280cd9fba1ce378
| 5,561
|
py
|
Python
|
tests/cephfs/cephfs_basic_tests.py
|
anrao19/cephci
|
0ec20acc108750f44a66dcf1c512922a91570f3e
|
[
"MIT"
] | null | null | null |
tests/cephfs/cephfs_basic_tests.py
|
anrao19/cephci
|
0ec20acc108750f44a66dcf1c512922a91570f3e
|
[
"MIT"
] | null | null | null |
tests/cephfs/cephfs_basic_tests.py
|
anrao19/cephci
|
0ec20acc108750f44a66dcf1c512922a91570f3e
|
[
"MIT"
] | null | null | null |
import logging
import random
import string
import traceback
from ceph.ceph import CommandFailed
from tests.cephfs.cephfs_utilsV1 import FsUtils
logger = logging.getLogger(__name__)
log = logger
def run(ceph_cluster, **kw):
try:
fs_util = FsUtils(ceph_cluster)
config = kw.get("config")
build = config.get("build", config.get("rhbuild"))
clients = ceph_cluster.get_ceph_objects("client")
fs_util.prepare_clients(clients, build)
fs_util.auth_list(clients)
mounting_dir = "".join(
random.choice(string.ascii_lowercase + string.digits)
for _ in list(range(10))
)
fuse_mounting_dir = f"/mnt/cephfs_fuse{mounting_dir}/"
fs_util.fuse_mount(clients, fuse_mounting_dir)
mount_test_case(clients, fuse_mounting_dir)
kernel_mounting_dir = f"/mnt/cephfs_kernel{mounting_dir}/"
mon_node_ips = fs_util.get_mon_node_ips()
fs_util.kernel_mount(clients, kernel_mounting_dir, ",".join(mon_node_ips))
mount_test_case(clients, kernel_mounting_dir)
log.info("Cleaning up!-----")
rc = fs_util.client_clean_up(
[],
clients,
kernel_mounting_dir,
"umount",
)
if rc != 0:
raise CommandFailed("fuse clients cleanup failed")
log.info("Fuse clients cleaned up successfully")
rc = fs_util.client_clean_up(
clients,
[],
fuse_mounting_dir,
"umount",
)
if rc != 0:
raise CommandFailed("kernel clients cleanup failed")
log.info("kernel clients cleaned up successfully")
return 0
except Exception as e:
log.info(e)
log.info(traceback.format_exc())
return 1
def mount_test_case(clients, mounting_dir):
try:
tc1 = "11293"
tc2 = "11296"
tc3 = "11297"
tc4 = "11295"
dir1 = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(10)
)
dir2 = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(10)
)
dir3 = "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(10)
)
results = []
return_counts = []
log.info("Create files and directories of 1000 depth and 1000 breadth")
for client in clients:
client.exec_command(
cmd=f"sudo mkdir -p {mounting_dir}{dir1} {mounting_dir}{dir2} {mounting_dir}{dir3}"
)
log.info(f"Execution of testcase {tc1} started")
out, rc = client.exec_command(
sudo=True,
cmd=f"python3 /home/cephuser/smallfile/smallfile_cli.py --operation create --threads 10 --file-size 4 "
f"--files 1000 --files-per-dir 10 --dirs-per-dir 2 --top "
f"{mounting_dir}{dir1}",
long_running=True,
)
log.info(f"Execution of testcase {tc1} ended")
results.append(f"TC {tc1} passed")
log.info(f"Execution of testcase {tc2} started")
client.exec_command(
cmd=f"sudo cp -r {mounting_dir}{dir1}/* {mounting_dir}{dir2}/"
)
client.exec_command(
cmd=f"diff -qr {mounting_dir}{dir1} {mounting_dir}{dir2}/"
)
log.info(f"Execution of testcase {tc2} ended")
results.append(f"TC {tc2} passed")
log.info(f"Execution of testcase {tc3} started")
client.exec_command(
cmd=f"sudo mv -t {mounting_dir}{dir1}/* {mounting_dir}{dir2}/"
)
log.info(f"Execution of testcase {tc3} ended")
results.append(f"TC {tc3} passed")
log.info(f"Execution of testcase {tc4} started")
for client in clients:
if client.pkg_type != "deb":
client.exec_command(
cmd=f"sudo dd if=/dev/zero of={mounting_dir}{client.node.hostname}.txt bs=100M "
"count=5"
)
out1, rc1 = client.exec_command(
cmd=f"sudo ls -c -ltd -- {mounting_dir}{client.node.hostname}.*"
)
client.exec_command(
cmd=f"sudo dd if=/dev/zero of={mounting_dir}{client.node.hostname}.txt bs=200M "
"count=5"
)
out2, rc2 = client.exec_command(
cmd=f"sudo ls -c -ltd -- {mounting_dir}{client.node.hostname}.*"
)
a = out1.read().decode()
b = out2.read().decode()
if a != b:
return_counts.append(out1.channel.recv_exit_status())
return_counts.append(out2.channel.recv_exit_status())
else:
raise CommandFailed("Metadata info command failed")
break
log.info(f"Execution of testcase {tc4} ended")
log.info(return_counts)
rc_set = set(return_counts)
if len(rc_set) == 1:
results.append(f"TC {tc4} passed")
log.info("Testcase Results:")
for res in results:
log.info(res)
break
except CommandFailed as e:
log.info(e)
log.info(traceback.format_exc())
| 37.073333
| 119
| 0.539112
|
7730d329469516456bde3b089f06bd36ccc0d19f
| 66,320
|
py
|
Python
|
src/oic/oic/__init__.py
|
Swapnilr1/pyoidc
|
2feb099f4f4e26047888437aea1226237d05ebba
|
[
"Apache-2.0"
] | null | null | null |
src/oic/oic/__init__.py
|
Swapnilr1/pyoidc
|
2feb099f4f4e26047888437aea1226237d05ebba
|
[
"Apache-2.0"
] | null | null | null |
src/oic/oic/__init__.py
|
Swapnilr1/pyoidc
|
2feb099f4f4e26047888437aea1226237d05ebba
|
[
"Apache-2.0"
] | null | null | null |
import hashlib
import logging
import os
from oic.utils.http_util import Response
try:
from json import JSONDecodeError
except ImportError: # Only works for >= 3.5
_decode_err = ValueError
else:
_decode_err = JSONDecodeError
import six
from future.backports.urllib.parse import urlparse
from future.moves.urllib.parse import parse_qs
from jwkest.jwe import JWE
from jwkest import jws, as_bytes
from jwkest import jwe
from requests import ConnectionError
from oic import oauth2, OIDCONF_PATTERN
from oic import rndstr
from oic.exception import AuthzError
from oic.exception import AuthnToOld
from oic.exception import ParameterError
from oic.exception import SubMismatch
from oic.oauth2 import HTTP_ARGS
from oic.oauth2 import authz_error
from oic.oauth2.consumer import ConfigurationError
from oic.oauth2.exception import OtherError
from oic.oauth2.exception import ParseError
from oic.oauth2.exception import MissingRequiredAttribute
from oic.oauth2.message import ErrorResponse
from oic.oauth2.message import Message
from oic.oauth2.util import get_or_post
from oic.oic.message import ClaimsRequest
from oic.oic.message import IdToken
from oic.oic.message import JasonWebToken
from oic.oic.message import SCOPE2CLAIMS
from oic.oic.message import RegistrationResponse
from oic.oic.message import AuthorizationResponse
from oic.oic.message import AccessTokenResponse
from oic.oic.message import Claims
from oic.oic.message import AccessTokenRequest
from oic.oic.message import RefreshAccessTokenRequest
from oic.oic.message import UserInfoRequest
from oic.oic.message import AuthorizationRequest
from oic.oic.message import OpenIDRequest
from oic.oic.message import RegistrationRequest
from oic.oic.message import RefreshSessionRequest
from oic.oic.message import CheckSessionRequest
from oic.oic.message import CheckIDRequest
from oic.oic.message import EndSessionRequest
from oic.oic.message import OpenIDSchema
from oic.oic.message import ProviderConfigurationResponse
from oic.oic.message import TokenErrorResponse
from oic.oic.message import ClientRegistrationErrorResponse
from oic.oic.message import UserInfoErrorResponse
from oic.oic.message import AuthorizationErrorResponse
from oic.exception import AccessDenied
from oic.exception import CommunicationError
from oic.exception import IssuerMismatch
from oic.exception import MissingParameter
from oic.exception import PyoidcError
from oic.exception import RegistrationError
from oic.exception import RequestError
from oic.utils import time_util
from oic.utils.keyio import KeyJar
from oic.utils.sanitize import sanitize
from oic.utils.webfinger import OIC_ISSUER
from oic.utils.webfinger import WebFinger
__author__ = 'rohe0002'
logger = logging.getLogger(__name__)
ENDPOINTS = ["authorization_endpoint", "token_endpoint",
"userinfo_endpoint", "refresh_session_endpoint",
"end_session_endpoint", "registration_endpoint",
"check_id_endpoint"]
RESPONSE2ERROR = {
"AuthorizationResponse": [AuthorizationErrorResponse,
TokenErrorResponse],
"AccessTokenResponse": [TokenErrorResponse],
"IdToken": [ErrorResponse],
"RegistrationResponse": [ClientRegistrationErrorResponse],
"OpenIDSchema": [UserInfoErrorResponse]
}
REQUEST2ENDPOINT = {
"AuthorizationRequest": "authorization_endpoint",
"OpenIDRequest": "authorization_endpoint",
"AccessTokenRequest": "token_endpoint",
"RefreshAccessTokenRequest": "token_endpoint",
"UserInfoRequest": "userinfo_endpoint",
"CheckSessionRequest": "check_session_endpoint",
"CheckIDRequest": "check_id_endpoint",
"EndSessionRequest": "end_session_endpoint",
"RefreshSessionRequest": "refresh_session_endpoint",
"RegistrationRequest": "registration_endpoint",
"RotateSecret": "registration_endpoint",
# ---
"ResourceRequest": "resource_endpoint",
'TokenIntrospectionRequest': 'introspection_endpoint',
'TokenRevocationRequest': 'revocation_endpoint',
"ROPCAccessTokenRequest": "token_endpoint",
}
# -----------------------------------------------------------------------------
JWT_BEARER = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
SAML2_BEARER_GRANT_TYPE = "urn:ietf:params:oauth:grant-type:saml2-bearer"
# This should probably be part of the configuration
MAX_AUTHENTICATION_AGE = 86400
DEF_SIGN_ALG = {"id_token": "RS256",
"openid_request_object": "RS256",
"client_secret_jwt": "HS256",
"private_key_jwt": "RS256"}
# -----------------------------------------------------------------------------
ACR_LISTS = [
["0", "1", "2", "3", "4"],
]
def verify_acr_level(req, level):
if req is None:
return level
elif "values" in req:
for _r in req["values"]:
for alist in ACR_LISTS:
try:
if alist.index(_r) <= alist.index(level):
return level
except ValueError:
pass
else: # Required or Optional
return level
raise AccessDenied("", req)
def deser_id_token(inst, txt=""):
if not txt:
return None
else:
return IdToken().from_jwt(txt, keyjar=inst.keyjar)
# -----------------------------------------------------------------------------
def make_openid_request(arq, keys=None, userinfo_claims=None,
idtoken_claims=None, request_object_signing_alg=None,
**kwargs):
"""
Construct the specification of what I want returned.
The request will be signed
:param arq: The Authorization request
:param keys: Keys to use for signing/encrypting
:param userinfo_claims: UserInfo claims
:param idtoken_claims: IdToken claims
:param request_object_signing_alg: Which signing algorithm to use
:return: JWT encoded OpenID request
"""
oir_args = {}
for prop in OpenIDRequest.c_param.keys():
try:
oir_args[prop] = arq[prop]
except KeyError:
pass
for attr in ["scope", "response_type"]:
if attr in oir_args:
oir_args[attr] = " ".join(oir_args[attr])
c_args = {}
if userinfo_claims is not None:
# UserInfoClaims
c_args["userinfo"] = Claims(**userinfo_claims)
if idtoken_claims is not None:
# IdTokenClaims
c_args["id_token"] = Claims(**idtoken_claims)
if c_args:
oir_args["claims"] = ClaimsRequest(**c_args)
oir = OpenIDRequest(**oir_args)
return oir.to_jwt(key=keys, algorithm=request_object_signing_alg)
class Token(oauth2.Token):
pass
class Grant(oauth2.Grant):
_authz_resp = AuthorizationResponse
_acc_resp = AccessTokenResponse
_token_class = Token
def add_token(self, resp):
tok = self._token_class(resp)
if tok.access_token:
self.tokens.append(tok)
else:
_tmp = getattr(tok, "id_token", None)
if _tmp:
self.tokens.append(tok)
PREFERENCE2PROVIDER = {
# "require_signed_request_object": "request_object_algs_supported",
"request_object_signing_alg": "request_object_signing_alg_values_supported",
"request_object_encryption_alg":
"request_object_encryption_alg_values_supported",
"request_object_encryption_enc":
"request_object_encryption_enc_values_supported",
"userinfo_signed_response_alg": "userinfo_signing_alg_values_supported",
"userinfo_encrypted_response_alg":
"userinfo_encryption_alg_values_supported",
"userinfo_encrypted_response_enc":
"userinfo_encryption_enc_values_supported",
"id_token_signed_response_alg": "id_token_signing_alg_values_supported",
"id_token_encrypted_response_alg":
"id_token_encryption_alg_values_supported",
"id_token_encrypted_response_enc":
"id_token_encryption_enc_values_supported",
"default_acr_values": "acr_values_supported",
"subject_type": "subject_types_supported",
"token_endpoint_auth_method": "token_endpoint_auth_methods_supported",
"token_endpoint_auth_signing_alg":
"token_endpoint_auth_signing_alg_values_supported",
"response_types": "response_types_supported",
'grant_types': 'grant_types_supported'
}
PROVIDER2PREFERENCE = dict([(v, k) for k, v in PREFERENCE2PROVIDER.items()])
PROVIDER_DEFAULT = {
"token_endpoint_auth_method": "client_secret_basic",
"id_token_signed_response_alg": "RS256",
}
PARAMMAP = {
"sign": "%s_signed_response_alg",
"alg": "%s_encrypted_response_alg",
"enc": "%s_encrypted_response_enc",
}
def claims_match(value, claimspec):
"""
Implements matching according to section 5.5.1 of
http://openid.net/specs/openid-connect-core-1_0.html
The lack of value is not checked here.
Also the text doesn't prohibit having both 'value' and 'values'.
:param value: single value or list of values
:param claimspec: None or dictionary with 'essential', 'value' or 'values'
as key
:return: Boolean
"""
if claimspec is None: # match anything
return True
matched = False
for key, val in claimspec.items():
if key == "value":
if value == val:
matched = True
elif key == "values":
if value in val:
matched = True
elif key == 'essential':
# Whether it's essential or not doesn't change anything here
continue
if matched:
break
if matched is False:
if list(claimspec.keys()) == ['essential']:
return True
return matched
class Client(oauth2.Client):
_endpoints = ENDPOINTS
def __init__(self, client_id=None,
client_prefs=None, client_authn_method=None, keyjar=None,
verify_ssl=True, config=None, client_cert=None,
requests_dir='requests'):
oauth2.Client.__init__(self, client_id,
client_authn_method=client_authn_method,
keyjar=keyjar, verify_ssl=verify_ssl,
config=config, client_cert=client_cert)
self.file_store = "./file/"
self.file_uri = "http://localhost/"
self.base_url = ''
# OpenID connect specific endpoints
for endpoint in ENDPOINTS:
setattr(self, endpoint, "")
self.id_token = None
self.log = None
self.request2endpoint = REQUEST2ENDPOINT
self.response2error = RESPONSE2ERROR
self.grant_class = Grant
self.token_class = Token
self.provider_info = Message()
self.registration_response = {}
self.client_prefs = client_prefs or {}
self.behaviour = {}
self.wf = WebFinger(OIC_ISSUER)
self.wf.httpd = self
self.allow = {}
self.post_logout_redirect_uris = []
self.registration_expires = 0
self.registration_access_token = None
self.id_token_max_age = 0
# Default key by kid for different key types
# For instance {'sig': {"RSA":"abc"}}
self.kid = {"sig": {}, "enc": {}}
self.requests_dir = requests_dir
def _get_id_token(self, **kwargs):
try:
return kwargs["id_token"]
except KeyError:
grant = self.get_grant(**kwargs)
if grant:
try:
_scope = kwargs["scope"]
except KeyError:
_scope = None
for token in grant.tokens:
if token.scope and _scope:
flag = True
for item in _scope:
if item not in token.scope:
flag = False
break
if not flag:
break
if token.id_token:
return token.id_token
return None
def request_object_encryption(self, msg, **kwargs):
try:
encalg = kwargs["request_object_encryption_alg"]
except KeyError:
try:
encalg = self.behaviour["request_object_encryption_alg"]
except KeyError:
return msg
try:
encenc = kwargs["request_object_encryption_enc"]
except KeyError:
try:
encenc = self.behaviour["request_object_encryption_enc"]
except KeyError:
raise MissingRequiredAttribute(
"No request_object_encryption_enc specified")
_jwe = JWE(msg, alg=encalg, enc=encenc)
_kty = jwe.alg2keytype(encalg)
try:
_kid = kwargs["enc_kid"]
except KeyError:
_kid = ""
if "target" not in kwargs:
raise MissingRequiredAttribute("No target specified")
if _kid:
_keys = self.keyjar.get_encrypt_key(_kty, owner=kwargs["target"],
kid=_kid)
_jwe["kid"] = _kid
else:
_keys = self.keyjar.get_encrypt_key(_kty, owner=kwargs["target"])
return _jwe.encrypt(_keys)
@staticmethod
def construct_redirect_uri(**kwargs):
_filedir = kwargs["local_dir"]
if not os.path.isdir(_filedir):
os.makedirs(_filedir)
_webpath = kwargs["base_path"]
_name = rndstr(10) + ".jwt"
filename = os.path.join(_filedir, _name)
while os.path.exists(filename):
_name = rndstr(10)
filename = os.path.join(_filedir, _name)
_webname = "%s%s" % (_webpath, _name)
return filename, _webname
def filename_from_webname(self, webname):
_filedir = self.requests_dir
if not os.path.isdir(_filedir):
os.makedirs(_filedir)
assert webname.startswith(self.base_url)
return webname[len(self.base_url):]
def construct_AuthorizationRequest(self, request=AuthorizationRequest,
request_args=None, extra_args=None,
request_param=None, **kwargs):
if request_args is not None:
# if "claims" in request_args:
# kwargs["claims"] = request_args["claims"]
# del request_args["claims"]
if "nonce" not in request_args:
_rt = request_args["response_type"]
if "token" in _rt or "id_token" in _rt:
request_args["nonce"] = rndstr(32)
elif "response_type" in kwargs:
if "token" in kwargs["response_type"]:
request_args = {"nonce": rndstr(32)}
else: # Never wrong to specify a nonce
request_args = {"nonce": rndstr(32)}
if "request_method" in kwargs:
if kwargs["request_method"] == "file":
request_param = "request_uri"
else:
request_param = "request"
del kwargs["request_method"]
areq = oauth2.Client.construct_AuthorizationRequest(self, request,
request_args,
extra_args,
**kwargs)
if request_param:
alg = None
for arg in ["request_object_signing_alg", "algorithm"]:
try: # Trumps everything
alg = kwargs[arg]
except KeyError:
pass
else:
break
if not alg:
try:
alg = self.behaviour["request_object_signing_alg"]
except KeyError:
alg = "none"
kwargs["request_object_signing_alg"] = alg
if "keys" not in kwargs and alg and alg != "none":
_kty = jws.alg2keytype(alg)
try:
_kid = kwargs["sig_kid"]
except KeyError:
_kid = self.kid["sig"].get(_kty, None)
kwargs["keys"] = self.keyjar.get_signing_key(_kty, kid=_kid)
_req = make_openid_request(areq, **kwargs)
# Should the request be encrypted
_req = self.request_object_encryption(_req, **kwargs)
if request_param == "request":
areq["request"] = _req
else:
try:
_webname = self.registration_response['request_uris'][0]
filename = self.filename_from_webname(_webname)
except KeyError:
filename, _webname = self.construct_redirect_uri(**kwargs)
fid = open(filename, mode="w")
fid.write(_req)
fid.close()
areq["request_uri"] = _webname
return areq
def construct_AccessTokenRequest(self, request=AccessTokenRequest,
request_args=None, extra_args=None,
**kwargs):
return oauth2.Client.construct_AccessTokenRequest(self, request,
request_args,
extra_args, **kwargs)
def construct_RefreshAccessTokenRequest(self,
request=RefreshAccessTokenRequest,
request_args=None, extra_args=None,
**kwargs):
return oauth2.Client.construct_RefreshAccessTokenRequest(self, request,
request_args,
extra_args,
**kwargs)
def construct_UserInfoRequest(self, request=UserInfoRequest,
request_args=None, extra_args=None,
**kwargs):
if request_args is None:
request_args = {}
if "access_token" in request_args:
pass
else:
if "scope" not in kwargs:
kwargs["scope"] = "openid"
token = self.get_token(**kwargs)
if token is None:
raise MissingParameter("No valid token available")
request_args["access_token"] = token.access_token
return self.construct_request(request, request_args, extra_args)
def construct_RegistrationRequest(self, request=RegistrationRequest,
request_args=None, extra_args=None,
**kwargs):
return self.construct_request(request, request_args, extra_args)
def construct_RefreshSessionRequest(self,
request=RefreshSessionRequest,
request_args=None, extra_args=None,
**kwargs):
return self.construct_request(request, request_args, extra_args)
def _id_token_based(self, request, request_args=None, extra_args=None,
**kwargs):
if request_args is None:
request_args = {}
try:
_prop = kwargs["prop"]
except KeyError:
_prop = "id_token"
if _prop in request_args:
pass
else:
id_token = self._get_id_token(**kwargs)
if id_token is None:
raise MissingParameter("No valid id token available")
request_args[_prop] = id_token
return self.construct_request(request, request_args, extra_args)
def construct_CheckSessionRequest(self, request=CheckSessionRequest,
request_args=None, extra_args=None,
**kwargs):
return self._id_token_based(request, request_args, extra_args, **kwargs)
def construct_CheckIDRequest(self, request=CheckIDRequest,
request_args=None,
extra_args=None, **kwargs):
# access_token is where the id_token will be placed
return self._id_token_based(request, request_args, extra_args,
prop="access_token", **kwargs)
def construct_EndSessionRequest(self, request=EndSessionRequest,
request_args=None, extra_args=None,
**kwargs):
if request_args is None:
request_args = {}
if "state" in kwargs:
request_args["state"] = kwargs["state"]
elif "state" in request_args:
kwargs["state"] = request_args["state"]
# if "redirect_url" not in request_args:
# request_args["redirect_url"] = self.redirect_url
return self._id_token_based(request, request_args, extra_args,
**kwargs)
# ------------------------------------------------------------------------
def authorization_request_info(self, request_args=None, extra_args=None,
**kwargs):
return self.request_info(AuthorizationRequest, "GET",
request_args, extra_args, **kwargs)
# ------------------------------------------------------------------------
def do_authorization_request(self, request=AuthorizationRequest,
state="", body_type="", method="GET",
request_args=None, extra_args=None,
http_args=None,
response_cls=AuthorizationResponse):
algs = self.sign_enc_algs("id_token")
if 'code_challenge' in self.config:
_args, code_verifier = self.add_code_challenge()
request_args.update(_args)
return oauth2.Client.do_authorization_request(self, request, state,
body_type, method,
request_args,
extra_args, http_args,
response_cls,
algs=algs)
def do_access_token_request(self, request=AccessTokenRequest,
scope="", state="", body_type="json",
method="POST", request_args=None,
extra_args=None, http_args=None,
response_cls=AccessTokenResponse,
authn_method="client_secret_basic", **kwargs):
atr = oauth2.Client.do_access_token_request(self, request, scope,
state, body_type, method,
request_args, extra_args,
http_args, response_cls,
authn_method, **kwargs)
try:
_idt = atr['id_token']
except KeyError:
pass
else:
try:
if self.state2nonce[state] != _idt['nonce']:
raise ParameterError('Someone has messed with "nonce"')
except KeyError:
pass
return atr
def do_access_token_refresh(self, request=RefreshAccessTokenRequest,
state="", body_type="json", method="POST",
request_args=None, extra_args=None,
http_args=None,
response_cls=AccessTokenResponse,
**kwargs):
return oauth2.Client.do_access_token_refresh(self, request, state,
body_type, method,
request_args,
extra_args, http_args,
response_cls, **kwargs)
def do_registration_request(self, request=RegistrationRequest,
scope="", state="", body_type="json",
method="POST", request_args=None,
extra_args=None, http_args=None,
response_cls=None):
url, body, ht_args, csi = self.request_info(request, method=method,
request_args=request_args,
extra_args=extra_args,
scope=scope, state=state)
if http_args is None:
http_args = ht_args
else:
http_args.update(http_args)
if response_cls is None:
response_cls = RegistrationResponse
response = self.request_and_return(url, response_cls, method, body,
body_type, state=state,
http_args=http_args)
return response
def do_check_session_request(self, request=CheckSessionRequest,
scope="",
state="", body_type="json", method="GET",
request_args=None, extra_args=None,
http_args=None,
response_cls=IdToken):
url, body, ht_args, csi = self.request_info(request, method=method,
request_args=request_args,
extra_args=extra_args,
scope=scope, state=state)
if http_args is None:
http_args = ht_args
else:
http_args.update(http_args)
return self.request_and_return(url, response_cls, method, body,
body_type, state=state,
http_args=http_args)
def do_check_id_request(self, request=CheckIDRequest, scope="",
state="", body_type="json", method="GET",
request_args=None, extra_args=None,
http_args=None,
response_cls=IdToken):
url, body, ht_args, csi = self.request_info(request, method=method,
request_args=request_args,
extra_args=extra_args,
scope=scope, state=state)
if http_args is None:
http_args = ht_args
else:
http_args.update(http_args)
return self.request_and_return(url, response_cls, method, body,
body_type, state=state,
http_args=http_args)
def do_end_session_request(self, request=EndSessionRequest, scope="",
state="", body_type="", method="GET",
request_args=None, extra_args=None,
http_args=None, response_cls=None):
url, body, ht_args, csi = self.request_info(request, method=method,
request_args=request_args,
extra_args=extra_args,
scope=scope, state=state)
if http_args is None:
http_args = ht_args
else:
http_args.update(http_args)
return self.request_and_return(url, response_cls, method, body,
body_type, state=state,
http_args=http_args)
def user_info_request(self, method="GET", state="", scope="", **kwargs):
uir = UserInfoRequest()
logger.debug("[user_info_request]: kwargs:%s" % (sanitize(kwargs),))
token = None
if "token" in kwargs:
if kwargs["token"]:
uir["access_token"] = kwargs["token"]
token = Token()
token.token_type = "Bearer"
token.access_token = kwargs["token"]
kwargs["behavior"] = "use_authorization_header"
else:
# What to do ? Need a callback
pass
elif "access_token" in kwargs and kwargs["access_token"]:
uir["access_token"] = kwargs["access_token"]
del kwargs["access_token"]
elif state:
token = self.grant[state].get_token(scope)
if token.is_valid():
uir["access_token"] = token.access_token
if token.token_type and token.token_type.lower() == "bearer" \
and method == "GET":
kwargs["behavior"] = "use_authorization_header"
else:
# raise oauth2.OldAccessToken
if self.log:
self.log.info("do access token refresh")
try:
self.do_access_token_refresh(token=token)
token = self.grant[state].get_token(scope)
uir["access_token"] = token.access_token
except Exception:
raise
uri = self._endpoint("userinfo_endpoint", **kwargs)
# If access token is a bearer token it might be sent in the
# authorization header
# 4 ways of sending the access_token:
# - POST with token in authorization header
# - POST with token in message body
# - GET with token in authorization header
# - GET with token as query parameter
if "behavior" in kwargs:
_behav = kwargs["behavior"]
_token = uir["access_token"]
_ttype = ''
try:
_ttype = kwargs["token_type"]
except KeyError:
if token:
try:
_ttype = token.token_type
except AttributeError:
raise MissingParameter("Unspecified token type")
if 'as_query_parameter' == _behav:
method = 'GET'
elif token:
# use_authorization_header, token_in_message_body
if "use_authorization_header" in _behav:
token_header = "{type} {token}".format(
type=_ttype.capitalize(),
token=_token)
if "headers" in kwargs:
kwargs["headers"].update(
{"Authorization": token_header})
else:
kwargs["headers"] = {"Authorization": token_header}
if "token_in_message_body" not in _behav:
# remove the token from the request
del uir["access_token"]
path, body, kwargs = get_or_post(uri, method, uir, **kwargs)
h_args = dict([(k, v) for k, v in kwargs.items() if k in HTTP_ARGS])
return path, body, method, h_args
def do_user_info_request(self, method="POST", state="", scope="openid",
request="openid", **kwargs):
kwargs["request"] = request
path, body, method, h_args = self.user_info_request(method, state,
scope, **kwargs)
logger.debug("[do_user_info_request] PATH:%s BODY:%s H_ARGS: %s" % (
sanitize(path), sanitize(body), sanitize(h_args)))
if self.events:
self.events.store('Request', {'body': body})
self.events.store('request_url', path)
self.events.store('request_http_args', h_args)
try:
resp = self.http_request(path, method, data=body, **h_args)
except oauth2.exception.MissingRequiredAttribute:
raise
if resp.status_code == 200:
if "application/json" in resp.headers["content-type"]:
sformat = "json"
else:
assert "application/jwt" in resp.headers["content-type"]
sformat = "jwt"
elif resp.status_code == 500:
raise PyoidcError("ERROR: Something went wrong: %s" % resp.text)
elif 400 <= resp.status_code < 500:
# the response text might be a OIDC message
try:
res = ErrorResponse().from_json(resp.text)
except Exception:
raise RequestError(resp.text)
else:
self.store_response(res, resp.text)
return res
else:
raise PyoidcError("ERROR: Something went wrong [%s]: %s" % (
resp.status_code, resp.text))
try:
_schema = kwargs["user_info_schema"]
except KeyError:
_schema = OpenIDSchema
logger.debug("Reponse text: '%s'" % sanitize(resp.text))
_txt = resp.text
if sformat == "json":
res = _schema().from_json(txt=_txt)
else:
res = _schema().from_jwt(_txt, keyjar=self.keyjar,
sender=self.provider_info["issuer"])
if 'error' in res: # Error response
res = UserInfoErrorResponse(**res.to_dict())
if state:
# Verify userinfo sub claim against what's returned in the ID Token
idt = self.grant[state].get_id_token()
if idt:
if idt['sub'] != res['sub']:
raise SubMismatch(
'Sub identifier not the same in userinfo and Id Token')
self.store_response(res, _txt)
return res
def get_userinfo_claims(self, access_token, endpoint, method="POST",
schema_class=OpenIDSchema, **kwargs):
uir = UserInfoRequest(access_token=access_token)
h_args = dict([(k, v) for k, v in kwargs.items() if k in HTTP_ARGS])
if "authn_method" in kwargs:
http_args = self.init_authentication_method(**kwargs)
else:
# If nothing defined this is the default
http_args = self.init_authentication_method(uir, "bearer_header",
**kwargs)
h_args.update(http_args)
path, body, kwargs = get_or_post(endpoint, method, uir, **kwargs)
try:
resp = self.http_request(path, method, data=body, **h_args)
except MissingRequiredAttribute:
raise
if resp.status_code == 200:
assert "application/json" in resp.headers["content-type"]
elif resp.status_code == 500:
raise PyoidcError("ERROR: Something went wrong: %s" % resp.text)
else:
raise PyoidcError(
"ERROR: Something went wrong [%s]: %s" % (resp.status_code,
resp.text))
res = schema_class().from_json(txt=resp.text)
self.store_response(res, resp.txt)
return res
def handle_provider_config(self, pcr, issuer, keys=True, endpoints=True):
"""
Deal with Provider Config Response
:param pcr: The ProviderConfigResponse instance
:param issuer: The one I thought should be the issuer of the config
:param keys: Should I deal with keys
:param endpoints: Should I deal with endpoints, that is store them
as attributes in self.
"""
if "issuer" in pcr:
_pcr_issuer = pcr["issuer"]
if pcr["issuer"].endswith("/"):
if issuer.endswith("/"):
_issuer = issuer
else:
_issuer = issuer + "/"
else:
if issuer.endswith("/"):
_issuer = issuer[:-1]
else:
_issuer = issuer
try:
self.allow["issuer_mismatch"]
except KeyError:
if _issuer != _pcr_issuer:
raise IssuerMismatch("'%s' != '%s'" % (_issuer, _pcr_issuer), pcr)
self.provider_info = pcr
else:
_pcr_issuer = issuer
if endpoints:
for key, val in pcr.items():
if key.endswith("_endpoint"):
setattr(self, key, val)
if keys:
if self.keyjar is None:
self.keyjar = KeyJar(verify_ssl=self.verify_ssl)
self.keyjar.load_keys(pcr, _pcr_issuer)
def provider_config(self, issuer, keys=True, endpoints=True,
response_cls=ProviderConfigurationResponse,
serv_pattern=OIDCONF_PATTERN):
if issuer.endswith("/"):
_issuer = issuer[:-1]
else:
_issuer = issuer
url = serv_pattern % _issuer
pcr = None
r = self.http_request(url, allow_redirects=True)
if r.status_code == 200:
try:
pcr = response_cls().from_json(r.text)
except Exception:
# FIXME: This should catch specific exception from `from_json()`
_err_txt = "Faulty provider config response: {}".format(r.text)
logger.error(sanitize(_err_txt))
raise ParseError(_err_txt)
# elif r.status_code == 302 or r.status_code == 301:
# while r.status_code == 302 or r.status_code == 301:
# redirect_header = r.headers["location"]
# if not urlparse(redirect_header).scheme:
# # Relative URL was provided - construct new redirect
# # using an issuer
# _split = urlparse(issuer)
# new_url = urlunparse((_split.scheme, _split.netloc,
# as_unicode(redirect_header),
# _split.params,
# _split.query, _split.fragment))
# r = self.http_request(new_url)
# if r.status_code == 200:
# pcr = response_cls().from_json(r.text)
# break
# logger.debug("Provider info: %s" % sanitize(pcr))
if pcr is None:
raise CommunicationError(
"Trying '%s', status %s" % (url, r.status_code))
self.store_response(pcr, r.text)
self.handle_provider_config(pcr, issuer, keys, endpoints)
return pcr
def unpack_aggregated_claims(self, userinfo):
if userinfo["_claim_sources"]:
for csrc, spec in userinfo["_claim_sources"].items():
if "JWT" in spec:
aggregated_claims = Message().from_jwt(
spec["JWT"].encode("utf-8"),
keyjar=self.keyjar, sender=csrc)
claims = [value for value, src in
userinfo["_claim_names"].items() if src == csrc]
if set(claims) != set(list(aggregated_claims.keys())):
logger.warning(
"Claims from claim source doesn't match what's in "
"the userinfo")
for key, vals in aggregated_claims.items():
userinfo[key] = vals
return userinfo
def fetch_distributed_claims(self, userinfo, callback=None):
for csrc, spec in userinfo["_claim_sources"].items():
if "endpoint" in spec:
if "access_token" in spec:
_uinfo = self.do_user_info_request(
method='GET', token=spec["access_token"],
userinfo_endpoint=spec["endpoint"])
else:
if callback:
_uinfo = self.do_user_info_request(
method='GET', token=callback(spec['endpoint']),
userinfo_endpoint=spec["endpoint"])
else:
_uinfo = self.do_user_info_request(
method='GET', userinfo_endpoint=spec["endpoint"])
claims = [value for value, src in
userinfo["_claim_names"].items() if src == csrc]
if set(claims) != set(list(_uinfo.keys())):
logger.warning(
"Claims from claim source doesn't match what's in "
"the userinfo")
for key, vals in _uinfo.items():
userinfo[key] = vals
return userinfo
def verify_alg_support(self, alg, usage, other):
"""
Verifies that the algorithm to be used are supported by the other side.
:param alg: The algorithm specification
:param usage: In which context the 'alg' will be used.
The following values are supported:
- userinfo
- id_token
- request_object
- token_endpoint_auth
:param other: The identifier for the other side
:return: True or False
"""
try:
_pcr = self.provider_info
supported = _pcr["%s_algs_supported" % usage]
except KeyError:
try:
supported = getattr(self, "%s_algs_supported" % usage)
except AttributeError:
supported = None
if supported is None:
return True
else:
if alg in supported:
return True
else:
return False
def match_preferences(self, pcr=None, issuer=None):
"""
Match the clients preferences against what the provider can do.
:param pcr: Provider configuration response if available
:param issuer: The issuer identifier
"""
if not pcr:
pcr = self.provider_info
regreq = RegistrationRequest
for _pref, _prov in PREFERENCE2PROVIDER.items():
try:
vals = self.client_prefs[_pref]
except KeyError:
continue
try:
_pvals = pcr[_prov]
except KeyError:
try:
self.behaviour[_pref] = PROVIDER_DEFAULT[_pref]
except KeyError:
# self.behaviour[_pref]= vals[0]
if isinstance(pcr.c_param[_prov][0], list):
self.behaviour[_pref] = []
else:
self.behaviour[_pref] = None
continue
if isinstance(vals, six.string_types):
if vals in _pvals:
self.behaviour[_pref] = vals
else:
vtyp = regreq.c_param[_pref]
if isinstance(vtyp[0], list):
self.behaviour[_pref] = []
for val in vals:
if val in _pvals:
self.behaviour[_pref].append(val)
else:
for val in vals:
if val in _pvals:
self.behaviour[_pref] = val
break
if _pref not in self.behaviour:
raise ConfigurationError(
"OP couldn't match preference:%s" % _pref, pcr)
for key, val in self.client_prefs.items():
if key in self.behaviour:
continue
try:
vtyp = regreq.c_param[key]
if isinstance(vtyp[0], list):
pass
elif isinstance(val, list) and not isinstance(val,
six.string_types):
val = val[0]
except KeyError:
pass
if key not in PREFERENCE2PROVIDER:
self.behaviour[key] = val
def store_registration_info(self, reginfo):
self.registration_response = reginfo
if "token_endpoint_auth_method" not in self.registration_response:
self.registration_response[
"token_endpoint_auth_method"] = "client_secret_basic"
self.client_id = reginfo["client_id"]
try:
self.client_secret = reginfo["client_secret"]
except KeyError: # Not required
pass
else:
try:
self.registration_expires = reginfo["client_secret_expires_at"]
except KeyError:
pass
try:
self.registration_access_token = reginfo[
"registration_access_token"]
except KeyError:
pass
def handle_registration_info(self, response):
err_msg = 'Got error response: {}'
unk_msg = 'Unknown response: {}'
if response.status_code in [200, 201]:
resp = RegistrationResponse().deserialize(response.text, "json")
# Some implementations sends back a 200 with an error message inside
try:
resp.verify()
except oauth2.message.MissingRequiredAttribute as err:
logger.error(err)
raise RegistrationError(err)
except Exception:
resp = ErrorResponse().deserialize(response.text, "json")
if resp.verify():
logger.error(err_msg.format(sanitize(resp.to_json())))
if self.events:
self.events.store('protocol response', resp)
raise RegistrationError(resp.to_dict())
else: # Something else
logger.error(unk_msg.format(sanitize(response.text)))
raise RegistrationError(response.text)
else:
# got a proper registration response
self.store_response(resp, response.text)
self.store_registration_info(resp)
elif 400 <= response.status_code <= 499:
try:
resp = ErrorResponse().deserialize(response.text, "json")
except _decode_err:
logger.error(unk_msg.format(sanitize(response.text)))
raise RegistrationError(response.text)
if resp.verify():
logger.error(err_msg.format(sanitize(resp.to_json())))
if self.events:
self.events.store('protocol response', resp)
raise RegistrationError(resp.to_dict())
else: # Something else
logger.error(unk_msg.format(sanitize(response.text)))
raise RegistrationError(response.text)
else:
raise RegistrationError(response.text)
return resp
def registration_read(self, url="", registration_access_token=None):
"""
Read the client registration info from the given url
:raises RegistrationError: If an error happend
:return: RegistrationResponse
"""
if not url:
url = self.registration_response["registration_client_uri"]
if not registration_access_token:
registration_access_token = self.registration_access_token
headers = {"Authorization": "Bearer %s" % registration_access_token}
rsp = self.http_request(url, "GET", headers=headers)
return self.handle_registration_info(rsp)
def generate_request_uris(self, request_dir):
"""
Need to generate a path that is unique for the OP combo
:return: A list of uris
"""
m = hashlib.sha256()
m.update(as_bytes(self.provider_info['issuer']))
m.update(as_bytes(self.base_url))
return '{}{}/{}'.format(self.base_url, request_dir, m.hexdigest())
def create_registration_request(self, **kwargs):
"""
Create a registration request
:param kwargs: parameters to the registration request
:return:
"""
req = RegistrationRequest()
for prop in req.parameters():
try:
req[prop] = kwargs[prop]
except KeyError:
try:
req[prop] = self.behaviour[prop]
except KeyError:
pass
if "post_logout_redirect_uris" not in req:
try:
req[
"post_logout_redirect_uris"] = \
self.post_logout_redirect_uris
except AttributeError:
pass
if "redirect_uris" not in req:
try:
req["redirect_uris"] = self.redirect_uris
except AttributeError:
raise MissingRequiredAttribute("redirect_uris", req)
try:
if self.provider_info['require_request_uri_registration'] is True:
req['request_uris'] = self.generate_request_uris(
self.requests_dir)
except KeyError:
pass
return req
def register(self, url, **kwargs):
"""
Register the client at an OP
:param url: The OPs registration endpoint
:param kwargs: parameters to the registration request
:return:
"""
req = self.create_registration_request(**kwargs)
logger.debug("[registration_request]: kwargs:%s" % (sanitize(kwargs),))
if self.events:
self.events.store('Protocol request', req)
headers = {"content-type": "application/json"}
rsp = self.http_request(url, "POST", data=req.to_json(),
headers=headers)
return self.handle_registration_info(rsp)
def normalization(self, principal, idtype="mail"):
if idtype == "mail":
(local, domain) = principal.split("@")
subject = "acct:%s" % principal
elif idtype == "url":
p = urlparse(principal)
domain = p.netloc
subject = principal
else:
domain = ""
subject = principal
return subject, domain
def discover(self, principal):
# subject, host = self.normalization(principal)
return self.wf.discovery_query(principal)
def sign_enc_algs(self, typ):
resp = {}
for key, val in PARAMMAP.items():
try:
resp[key] = self.registration_response[val % typ]
except (TypeError, KeyError):
if key == "sign":
resp[key] = DEF_SIGN_ALG["id_token"]
return resp
def _verify_id_token(self, id_token, nonce="", acr_values=None, auth_time=0,
max_age=0):
"""
If the JWT alg Header Parameter uses a MAC based algorithm s uch as
HS256, HS384, or HS512, the octets of the UTF-8 representation of the
client_secret corresponding to the client_id contained in the aud
(audience) Claim are used as the key to validate the signature. For MAC
based algorithms, the behavior is unspecified if the aud is
multi-valued or if an azp value is present that is different than the
aud value.
:param id_token: The ID Token tp check
:param nonce: The nonce specified in the authorization request
:param acr_values: Asked for acr values
:param auth_time: An auth_time claim
:param max_age: Max age of authentication
"""
if self.provider_info["issuer"] != id_token["iss"]:
raise OtherError("issuer != iss")
if self.client_id not in id_token["aud"]:
raise OtherError("not intended for me")
if len(id_token["aud"]) > 1:
if "azp" not in id_token or id_token["azp"] != self.client_id:
raise OtherError("not intended for me")
_now = time_util.utc_time_sans_frac()
if _now > id_token["exp"]:
raise OtherError("Passed best before date")
if self.id_token_max_age and _now > int(id_token["iat"]) + self.id_token_max_age:
raise OtherError("I think this ID token is to old")
if nonce and nonce != id_token['nonce']:
raise OtherError("nonce mismatch")
if acr_values and id_token['acr'] not in acr_values:
raise OtherError("acr mismatch")
if max_age and _now > int(id_token['auth_time'] + max_age):
raise AuthnToOld("To old authentication")
if auth_time:
if not claims_match(id_token["auth_time"], {"auth_time": auth_time}):
raise AuthnToOld("To old authentication")
def verify_id_token(self, id_token, authn_req):
kwa = {}
try:
kwa["nonce"] = authn_req["nonce"]
except KeyError:
pass
for param in ["acr_values", "max_age"]:
try:
kwa[param] = authn_req[param]
except KeyError:
pass
self._verify_id_token(id_token, **kwa)
class Server(oauth2.Server):
def __init__(self, keyjar=None, verify_ssl=True,
client_cert=None):
oauth2.Server.__init__(self, keyjar, verify_ssl,
client_cert=client_cert)
@staticmethod
def _parse_urlencoded(url=None, query=None):
if url:
parts = urlparse(url)
scheme, netloc, path, params, query, fragment = parts[:6]
return parse_qs(query)
def parse_token_request(self, request=AccessTokenRequest,
body=None):
return oauth2.Server.parse_token_request(self, request, body)
def handle_request_uri(self, request_uri, verify=True, sender=''):
"""
:param request_uri: URL pointing to where the signed request should
be fetched from.
:param verify: Whether the signature on the request should be verified.
Don't use anything but the default unless you REALLY know what you're
doing
:param sender: The issuer of the request JWT.
:return:
"""
# Do a HTTP get
logger.debug('Get request from request_uri: {}'.format(request_uri))
try:
http_req = self.http_request(request_uri)
except ConnectionError:
logger.error('Connection Error')
return authz_error("invalid_request_uri")
if not http_req:
logger.error('Nothing returned')
return authz_error("invalid_request_uri")
elif http_req.status_code >= 400:
logger.error('HTTP error {}:{}'.format(http_req.status_code,
http_req.text))
raise AuthzError('invalid_request')
# http_req.text is a signed JWT
try:
logger.debug('request txt: {}'.format(http_req.text))
req = self.parse_jwt_request(txt=http_req.text, verify=verify,
sender=sender)
except Exception as err:
logger.error(
'{}:{} encountered while parsing fetched request'.format(
err.__class__, err))
raise AuthzError("invalid_openid_request_object")
logger.debug('Fetched request: {}'.format(req))
return req
def parse_authorization_request(self, request=AuthorizationRequest,
url=None, query=None, keys=None):
if url:
parts = urlparse(url)
scheme, netloc, path, params, query, fragment = parts[:6]
if isinstance(query, dict):
sformat = "dict"
else:
sformat = 'urlencoded'
_req = self._parse_request(request, query, sformat, verify=False)
if self.events:
self.events.store('Request', _req)
_req_req = {}
try:
_request = _req['request']
except KeyError:
try:
_url = _req['request_uri']
except KeyError:
pass
else:
_req_req = self.handle_request_uri(_url, verify=False,
sender=_req['client_id'])
else:
if isinstance(_request, Message):
_req_req = _request
else:
try:
_req_req = self.parse_jwt_request(request, txt=_request,
verify=False)
except Exception as err:
_req_req = self._parse_request(request, _request,
'urlencoded', verify=False)
else: # remove JWT attributes
for attr in JasonWebToken.c_param:
try:
del _req_req[attr]
except KeyError:
pass
if isinstance(_req_req, Response):
return _req_req
if _req_req:
if self.events:
self.events.store('Signed Request', _req_req)
for key, val in _req.items():
if key in ['request', 'request_uri']:
continue
if key not in _req_req:
_req_req[key] = val
_req = _req_req
if self.events:
self.events.store('Combined Request', _req)
try:
_req.verify(keyjar=self.keyjar)
except Exception as err:
if self.events:
self.events.store('Exception', err)
logger.error(err)
raise
return _req
def parse_jwt_request(self, request=AuthorizationRequest, txt="",
keys=None, verify=True, sender=''):
return oauth2.Server.parse_jwt_request(self, request, txt, keys, verify,
sender=sender)
def parse_refresh_token_request(self,
request=RefreshAccessTokenRequest,
body=None):
return oauth2.Server.parse_refresh_token_request(self, request, body)
def parse_check_session_request(self, url=None, query=None):
"""
"""
param = self._parse_urlencoded(url, query)
assert "id_token" in param # ignore the rest
return deser_id_token(self, param["id_token"][0])
def parse_check_id_request(self, url=None, query=None):
"""
"""
param = self._parse_urlencoded(url, query)
assert "access_token" in param # ignore the rest
return deser_id_token(self, param["access_token"][0])
def _parse_request(self, request_cls, data, sformat, client_id=None,
verify=True):
if sformat == "json":
request = request_cls().from_json(data)
elif sformat == "jwt":
request = request_cls().from_jwt(data, keyjar=self.keyjar,
sender=client_id)
elif sformat == "urlencoded":
if '?' in data:
parts = urlparse(data)
scheme, netloc, path, params, query, fragment = parts[:6]
else:
query = data
request = request_cls().from_urlencoded(query)
elif sformat == 'dict':
request = request_cls(**data)
else:
raise ParseError("Unknown package format: '{}'".format(sformat),
request_cls)
# get the verification keys
if client_id:
keys = self.keyjar.verify_keys(client_id)
sender = client_id
else:
try:
keys = self.keyjar.verify_keys(request["client_id"])
sender = request['client_id']
except KeyError:
keys = None
sender = ''
logger.debug("Found {} verify keys".format(len(keys or '')))
if verify:
request.verify(key=keys, keyjar=self.keyjar, sender=sender)
return request
def parse_open_id_request(self, data, sformat="urlencoded", client_id=None):
return self._parse_request(OpenIDRequest, data, sformat, client_id)
def parse_user_info_request(self, data, sformat="urlencoded"):
return self._parse_request(UserInfoRequest, data, sformat)
def parse_userinfo_request(self, data, sformat="urlencoded"):
return self._parse_request(UserInfoRequest, data, sformat)
def parse_refresh_session_request(self, url=None, query=None):
if url:
parts = urlparse(url)
scheme, netloc, path, params, query, fragment = parts[:6]
return RefreshSessionRequest().from_urlencoded(query)
def parse_registration_request(self, data, sformat="urlencoded"):
return self._parse_request(RegistrationRequest, data, sformat)
def parse_end_session_request(self, query, sformat="urlencoded"):
esr = self._parse_request(EndSessionRequest, query,
sformat)
# if there is a id_token in there it is as a string
esr["id_token"] = deser_id_token(self, esr["id_token"])
return esr
@staticmethod
def update_claims(session, where, about, old_claims=None):
"""
:param session:
:param where: Which request
:param about: userinfo or id_token
:param old_claims:
:return: claims or None
"""
if old_claims is None:
old_claims = {}
req = None
if where == "oidreq":
try:
req = OpenIDRequest().deserialize(session[where], "json")
except KeyError:
pass
else: # where == "authzreq"
try:
req = AuthorizationRequest().deserialize(session[where], "json")
except KeyError:
pass
if req:
logger.debug("%s: %s" % (where, sanitize(req.to_dict())))
try:
_claims = req["claims"][about]
if _claims:
# update with old claims, do not overwrite
for key, val in old_claims.items():
if key not in _claims:
_claims[key] = val
return _claims
except KeyError:
pass
return old_claims
def id_token_claims(self, session):
"""
Pick the IdToken claims from the request
:param session: Session information
:return: The IdToken claims
"""
itc = {}
itc = self.update_claims(session, "authzreq", "id_token", itc)
itc = self.update_claims(session, "oidreq", "id_token", itc)
return itc
def make_id_token(self, session, loa="2", issuer="",
alg="RS256", code=None, access_token=None,
user_info=None, auth_time=0, exp=None, extra_claims=None):
"""
:param session: Session information
:param loa: Level of Assurance/Authentication context
:param issuer: My identifier
:param alg: Which signing algorithm to use for the IdToken
:param code: Access grant
:param access_token: Access Token
:param user_info: If user info are to be part of the IdToken
:return: IDToken instance
"""
# defaults
if exp is None:
inawhile = {"days": 1}
else:
inawhile = exp
# Handle the idtoken_claims
extra = {}
itc = self.id_token_claims(session)
if itc.keys():
try:
inawhile = {"seconds": itc["max_age"]}
except KeyError:
pass
for key, val in itc.items():
if key == "auth_time":
extra["auth_time"] = auth_time
elif key == "acr":
# ["2","http://id.incommon.org/assurance/bronze"]
extra["acr"] = verify_acr_level(val, loa)
else:
if auth_time:
extra["auth_time"] = auth_time
if loa:
extra["acr"] = loa
if not user_info:
_args = {}
else:
try:
_args = user_info.to_dict()
except AttributeError:
_args = user_info
# Make sure that there are no name clashes
for key in ["iss", "sub", "aud", "exp", "acr", "nonce",
"auth_time"]:
try:
del _args[key]
except KeyError:
pass
halg = "HS%s" % alg[-3:]
if extra_claims is not None:
_args.update(extra_claims)
if code:
_args["c_hash"] = jws.left_hash(code.encode("utf-8"), halg)
if access_token:
_args["at_hash"] = jws.left_hash(access_token.encode("utf-8"),
halg)
idt = IdToken(iss=issuer, sub=session["sub"],
aud=session["client_id"],
exp=time_util.epoch_in_a_while(**inawhile), acr=loa,
iat=time_util.utc_time_sans_frac(),
**_args)
for key, val in extra.items():
idt[key] = val
if "nonce" in session:
idt["nonce"] = session["nonce"]
return idt
def scope2claims(scopes):
res = {}
for scope in scopes:
try:
claims = dict([(name, None) for name in SCOPE2CLAIMS[scope]])
res.update(claims)
except KeyError:
continue
return res
| 36.844444
| 89
| 0.540048
|
49c2d3c26e92401f8d5782ff1dcb394c5547403f
| 26,140
|
py
|
Python
|
src/sage/monoids/string_monoid.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 1,742
|
2015-01-04T07:06:13.000Z
|
2022-03-30T11:32:52.000Z
|
src/sage/monoids/string_monoid.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 66
|
2015-03-19T19:17:24.000Z
|
2022-03-16T11:59:30.000Z
|
src/sage/monoids/string_monoid.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 495
|
2015-01-10T10:23:18.000Z
|
2022-03-24T22:06:11.000Z
|
r"""
Free String Monoids
AUTHORS:
- David Kohel <kohel@maths.usyd.edu.au>, 2007-01
Sage supports a wide range of specific free string monoids.
"""
#*****************************************************************************
# Copyright (C) 2007 David Kohel <kohel@maths.usyd.edu.au>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from .free_monoid import FreeMonoid
from .string_monoid_element import StringMonoidElement
from .string_ops import strip_encoding
class StringMonoid_class(FreeMonoid):
r"""
A free string monoid on `n` generators.
"""
def __init__(self, n, alphabet=()):
r"""
Create free binary string monoid on `n` generators.
INPUT:
- ``n`` -- Integer
- ``alphabet`` -- String or tuple whose characters or elements denote
the generators.
EXAMPLES::
sage: S = BinaryStrings(); S
Free binary string monoid
sage: x = S.gens()
sage: x[0]*x[1]**5 * (x[0]*x[1])
01111101
"""
# Names must be alphabetical -- omitted since printing is
# defined locally.
# FreeMonoid.__init__(self, n, names = alphabet)
FreeMonoid.__init__(self, n)
self._alphabet = alphabet
def __contains__(self, x):
return isinstance(x, StringMonoidElement) and x.parent() == self
def alphabet(self):
return tuple(self._alphabet)
def one(self):
r"""
Return the identity element of ``self``.
EXAMPLES::
sage: b = BinaryStrings(); b
Free binary string monoid
sage: b.one() * b('1011')
1011
sage: b.one() * b('110') == b('110')
True
sage: b('10101') * b.one() == b('101011')
False
"""
return StringMonoidElement(self, '')
def gen(self, i=0):
r"""
The `i`-th generator of the monoid.
INPUT:
- ``i`` -- integer (default: 0)
EXAMPLES::
sage: S = BinaryStrings()
sage: S.gen(0)
0
sage: S.gen(1)
1
sage: S.gen(2)
Traceback (most recent call last):
...
IndexError: Argument i (= 2) must be between 0 and 1.
sage: S = HexadecimalStrings()
sage: S.gen(0)
0
sage: S.gen(12)
c
sage: S.gen(16)
Traceback (most recent call last):
...
IndexError: Argument i (= 16) must be between 0 and 15.
"""
n = self.ngens()
if i < 0 or not i < n:
raise IndexError(
"Argument i (= %s) must be between 0 and %s." % (i, n-1))
return StringMonoidElement(self, [int(i)])
#*****************************************************************************
# Specific global string monoids
#*****************************************************************************
class BinaryStringMonoid(StringMonoid_class):
r"""
The free binary string monoid on generators `\{ 0, 1 \}`.
"""
def __init__(self):
r"""
Create free binary string monoid on generators `\{ 0, 1 \}`.
EXAMPLES::
sage: S = BinaryStrings(); S
Free binary string monoid
sage: x = S.gens()
sage: x[0]*x[1]**5 * (x[0]*x[1])
01111101
sage: u = S('')
sage: u
sage: x = S('0')
sage: x
0
sage: y = S('1')
sage: y
1
sage: z = S('01110')
sage: z
01110
sage: x*y^3*x == z
True
sage: u*x == x*u
True
TESTS::
sage: BinaryStrings() == BinaryStrings()
True
sage: BinaryStrings() is BinaryStrings()
True
"""
StringMonoid_class.__init__(self, 2, ['0', '1'])
def __repr__(self):
return "Free binary string monoid"
def __call__(self, x, check=True):
r"""
Return ``x`` coerced into this free monoid.
One can create a free binary string monoid element from a
Python string of 0's and 1's or list of integers.
NOTE: Due to the ambiguity of the second generator '1' with
the identity element '' of the monoid, the syntax S(1) is not
permissible.
EXAMPLES::
sage: S = BinaryStrings()
sage: S('101')
101
sage: S.gen(0)
0
sage: S.gen(1)
1
"""
## There should really some careful type checking here...
if isinstance(x, StringMonoidElement) and x.parent() == self:
return x
elif isinstance(x, list):
return StringMonoidElement(self, x, check)
elif isinstance(x, str):
return StringMonoidElement(self, x, check)
else:
raise TypeError("Argument x (= %s) is of the wrong type." % x)
def encoding(self, S, padic=False):
r"""
The binary encoding of the string ``S``, as a binary string element.
The default is to keep the standard ASCII byte encoding, e.g.
::
A = 65 -> 01000001
B = 66 -> 01000010
.
.
.
Z = 90 -> 01001110
rather than a 2-adic representation 65 -> 10000010.
Set ``padic=True`` to reverse the bit string.
EXAMPLES::
sage: S = BinaryStrings()
sage: S.encoding('A')
01000001
sage: S.encoding('A',padic=True)
10000010
sage: S.encoding(' ',padic=True)
00000100
"""
bit_string = []
for i in range(len(S)):
n = ord(S[i])
bits = []
for i in range(8):
bits.append(n%2)
n = n >> 1
if not padic:
bits.reverse()
bit_string.extend(bits)
return self(bit_string)
# def ngens(self):
# r"""
# Return the number of generators of this free binary string monoid.
# There are only 2 elements in the binary number system. Hence, this
# is the number of generators.
# EXAMPLES::
# sage: S = BinaryStrings()
# sage: S.ngens()
# 2
# """
# return 2
BinaryStrings = BinaryStringMonoid
class OctalStringMonoid(StringMonoid_class):
r"""
The free octal string monoid on generators `\{ 0, 1, \dots, 7 \}`.
"""
def __init__(self):
r"""
Create free octal string monoid on generators `\{ 0, 1, \dots, 7 \}`.
EXAMPLES::
sage: S = OctalStrings(); S
Free octal string monoid
sage: x = S.gens()
sage: (x[0]*x[7])**3 * (x[0]*x[1]*x[6]*x[5])**2
07070701650165
sage: S([ i for i in range(8) ])
01234567
sage: x[0]
0
sage: x[7]
7
sage: x[0] * x[3]^3 * x[5]^4 * x[6]
033355556
"""
StringMonoid_class.__init__(self, 8, [ str(i) for i in range(8) ])
def __repr__(self):
return "Free octal string monoid"
def __call__(self, x, check=True):
r"""
Return ``x`` coerced into this free monoid.
One can create a free octal string monoid element from a
Python string of 0's to 7's or list of integers.
EXAMPLES::
sage: S = OctalStrings()
sage: S('07070701650165')
07070701650165
sage: S.gen(0)
0
sage: S.gen(1)
1
sage: S([ i for i in range(8) ])
01234567
"""
## There should really some careful type checking here...
if isinstance(x, StringMonoidElement) and x.parent() == self:
return x
elif isinstance(x, list):
return StringMonoidElement(self, x, check)
elif isinstance(x, str):
return StringMonoidElement(self, x, check)
else:
raise TypeError("Argument x (= %s) is of the wrong type." % x)
OctalStrings = OctalStringMonoid
class HexadecimalStringMonoid(StringMonoid_class):
r"""
The free hexadecimal string monoid on generators
`\{ 0, 1, \dots, 9, a, b, c, d, e, f \}`.
"""
def __init__(self):
r"""
Create free hexadecimal string monoid on generators
`\{ 0, 1, \dots, 9, a, b, c, d, e, f \}`.
EXAMPLES::
sage: S = HexadecimalStrings(); S
Free hexadecimal string monoid
sage: x = S.gens()
sage: (x[0]*x[10])**3 * (x[0]*x[1]*x[9]*x[15])**2
0a0a0a019f019f
sage: S([ i for i in range(16) ])
0123456789abcdef
sage: x = S.gen(0)
sage: y = S.gen(10)
sage: z = S.gen(15)
sage: z
f
sage: x*y^3*z
0aaaf
"""
alph = '0123456789abcdef'
StringMonoid_class.__init__(self, 16, [ alph[i] for i in range(16) ])
def __repr__(self):
return "Free hexadecimal string monoid"
def __call__(self, x, check=True):
r"""
Return ``x`` coerced into this free monoid.
One can create a free hexadecimal string monoid element from a
Python string of a list of integers in `\{ 0, \dots, 15 \}`.
EXAMPLES::
sage: S = HexadecimalStrings()
sage: S('0a0a0a019f019f')
0a0a0a019f019f
sage: S.gen(0)
0
sage: S.gen(1)
1
sage: S([ i for i in range(16) ])
0123456789abcdef
"""
## There should really some careful type checking here...
if isinstance(x, StringMonoidElement) and x.parent() == self:
return x
elif isinstance(x, list):
return StringMonoidElement(self, x, check)
elif isinstance(x, str):
return StringMonoidElement(self, x, check)
else:
raise TypeError("Argument x (= %s) is of the wrong type." % x)
def encoding(self, S, padic=False):
r"""
The encoding of the string ``S`` as a hexadecimal string element.
The default is to keep the standard right-to-left byte encoding, e.g.
::
A = '\x41' -> 41
B = '\x42' -> 42
.
.
.
Z = '\x5a' -> 5a
rather than a left-to-right representation A = 65 -> 14.
Although standard (e.g., in the Python constructor '\xhh'),
this can be confusing when the string reads left-to-right.
Set ``padic=True`` to reverse the character encoding.
EXAMPLES::
sage: S = HexadecimalStrings()
sage: S.encoding('A')
41
sage: S.encoding('A',padic=True)
14
sage: S.encoding(' ',padic=False)
20
sage: S.encoding(' ',padic=True)
02
"""
hex_string = []
for i in range(len(S)):
n = ord(S[i])
n0 = n % 16
n1 = n // 16
if not padic:
hex_chars = [n1, n0]
else:
hex_chars = [n0, n1]
hex_string.extend(hex_chars)
return self(hex_string)
HexadecimalStrings = HexadecimalStringMonoid
class Radix64StringMonoid(StringMonoid_class):
r"""
The free radix 64 string monoid on 64 generators.
"""
def __init__(self):
r"""
Create free radix 64 string monoid on 64 generators.
EXAMPLES::
sage: S = Radix64Strings(); S
Free radix 64 string monoid
sage: x = S.gens()
sage: (x[50]*x[10])**3 * (x[60]*x[1]*x[19]*x[35])**2
yKyKyK8BTj8BTj
sage: S([ i for i in range(64) ])
ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/
sage: x[0]
A
sage: x[62]
+
sage: x[63]
/
"""
alph = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
StringMonoid_class.__init__(self, 64, [ alph[i] for i in range(64) ])
def __repr__(self):
return "Free radix 64 string monoid"
def __call__(self, x, check=True):
r"""
Return ``x`` coerced into this free monoid.
One can create a free radix 64 string monoid element from a
Python string or a list of integers in `0, \dots, 63`, as for
generic ``FreeMonoids``.
EXAMPLES::
sage: S = Radix64Strings()
sage: S.gen(0)
A
sage: S.gen(1)
B
sage: S.gen(62)
+
sage: S.gen(63)
/
sage: S([ i for i in range(64) ])
ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/
"""
## There should really some careful type checking here...
if isinstance(x, StringMonoidElement) and x.parent() == self:
return x
elif isinstance(x, list):
return StringMonoidElement(self, x, check)
elif isinstance(x, str):
return StringMonoidElement(self, x, check)
else:
raise TypeError("Argument x (= %s) is of the wrong type." % x)
Radix64Strings = Radix64StringMonoid
class AlphabeticStringMonoid(StringMonoid_class):
"""
The free alphabetic string monoid on generators A-Z.
EXAMPLES::
sage: S = AlphabeticStrings(); S
Free alphabetic string monoid on A-Z
sage: S.gen(0)
A
sage: S.gen(25)
Z
sage: S([ i for i in range(26) ])
ABCDEFGHIJKLMNOPQRSTUVWXYZ
"""
def __init__(self):
r"""
Create free alphabetic string monoid on generators A-Z.
EXAMPLES::
sage: S = AlphabeticStrings(); S
Free alphabetic string monoid on A-Z
sage: S.gen(0)
A
sage: S.gen(25)
Z
sage: S([ i for i in range(26) ])
ABCDEFGHIJKLMNOPQRSTUVWXYZ
"""
from sage.rings.all import RealField
RR = RealField()
# The characteristic frequency probability distribution of
# Robert Edward Lewand.
self._characteristic_frequency_lewand = {
"A": RR(0.08167), "B": RR(0.01492),
"C": RR(0.02782), "D": RR(0.04253),
"E": RR(0.12702), "F": RR(0.02228),
"G": RR(0.02015), "H": RR(0.06094),
"I": RR(0.06966), "J": RR(0.00153),
"K": RR(0.00772), "L": RR(0.04025),
"M": RR(0.02406), "N": RR(0.06749),
"O": RR(0.07507), "P": RR(0.01929),
"Q": RR(0.00095), "R": RR(0.05987),
"S": RR(0.06327), "T": RR(0.09056),
"U": RR(0.02758), "V": RR(0.00978),
"W": RR(0.02360), "X": RR(0.00150),
"Y": RR(0.01974), "Z": RR(0.00074)}
# The characteristic frequency probability distribution of
# H. Beker and F. Piper.
self._characteristic_frequency_beker_piper = {
"A": RR(0.082), "B": RR(0.015),
"C": RR(0.028), "D": RR(0.043),
"E": RR(0.127), "F": RR(0.022),
"G": RR(0.020), "H": RR(0.061),
"I": RR(0.070), "J": RR(0.002),
"K": RR(0.008), "L": RR(0.040),
"M": RR(0.024), "N": RR(0.067),
"O": RR(0.075), "P": RR(0.019),
"Q": RR(0.001), "R": RR(0.060),
"S": RR(0.063), "T": RR(0.091),
"U": RR(0.028), "V": RR(0.010),
"W": RR(0.023), "X": RR(0.001),
"Y": RR(0.020), "Z": RR(0.001)}
alph = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
StringMonoid_class.__init__(self, 26, [ alph[i] for i in range(26) ])
def __repr__(self):
return "Free alphabetic string monoid on A-Z"
def __call__(self, x, check=True):
r"""
Return ``x`` coerced into this free monoid.
One can create a free alphabetic string monoid element from a
Python string, or a list of integers in `0, \dots,25`.
EXAMPLES::
sage: S = AlphabeticStrings()
sage: S.gen(0)
A
sage: S.gen(1)
B
sage: S.gen(25)
Z
sage: S([ i for i in range(26) ])
ABCDEFGHIJKLMNOPQRSTUVWXYZ
"""
## There should really some careful type checking here...
if isinstance(x, StringMonoidElement) and x.parent() == self:
return x
elif isinstance(x, list):
return StringMonoidElement(self, x, check)
elif isinstance(x, str):
return StringMonoidElement(self, x, check)
else:
raise TypeError("Argument x (= %s) is of the wrong type." % x)
def characteristic_frequency(self, table_name="beker_piper"):
r"""
Return a table of the characteristic frequency probability
distribution of the English alphabet. In written English, various
letters of the English alphabet occur more frequently than others.
For example, the letter "E" appears more often than other
vowels such as "A", "I", "O", and "U". In long works of written
English such as books, the probability of a letter occurring tends
to stabilize around a value. We call this value the characteristic
frequency probability of the letter under consideration. When this
probability is considered for each letter of the English alphabet,
the resulting probabilities for all letters of this alphabet is
referred to as the characteristic frequency probability distribution.
Various studies report slightly different values for the
characteristic frequency probability of an English letter. For
instance, [Lew2000]_ reports that "E" has a characteristic
frequency probability of 0.12702, while [BP1982]_ reports this
value as 0.127. The concepts of characteristic frequency probability
and characteristic frequency probability distribution can also be
applied to non-empty alphabets other than the English alphabet.
The output of this method is different from that of the method
:func:`frequency_distribution()
<sage.monoids.string_monoid_element.StringMonoidElement.frequency_distribution>`.
One can think of the characteristic frequency probability of an
element in an alphabet `A` as the expected probability of that element
occurring. Let `S` be a string encoded using elements of `A`. The
frequency probability distribution corresponding to `S` provides us
with the frequency probability of each element of `A` as observed
occurring in `S`. Thus one distribution provides expected
probabilities, while the other provides observed probabilities.
INPUT:
- ``table_name`` -- (default ``"beker_piper"``) the table of
characteristic frequency probability distribution to use. The
following tables are supported:
- ``"beker_piper"`` -- the table of characteristic frequency
probability distribution by Beker and Piper [BP1982]_. This is
the default table to use.
- ``"lewand"`` -- the table of characteristic frequency
probability distribution by Lewand as described on page 36
of [Lew2000]_.
OUTPUT:
- A table of the characteristic frequency probability distribution
of the English alphabet. This is a dictionary of letter/probability
pairs.
EXAMPLES:
The characteristic frequency probability distribution table of
Beker and Piper [BP1982]_::
sage: A = AlphabeticStrings()
sage: table = A.characteristic_frequency(table_name="beker_piper")
sage: sorted(table.items())
<BLANKLINE>
[('A', 0.0820000000000000),
('B', 0.0150000000000000),
('C', 0.0280000000000000),
('D', 0.0430000000000000),
('E', 0.127000000000000),
('F', 0.0220000000000000),
('G', 0.0200000000000000),
('H', 0.0610000000000000),
('I', 0.0700000000000000),
('J', 0.00200000000000000),
('K', 0.00800000000000000),
('L', 0.0400000000000000),
('M', 0.0240000000000000),
('N', 0.0670000000000000),
('O', 0.0750000000000000),
('P', 0.0190000000000000),
('Q', 0.00100000000000000),
('R', 0.0600000000000000),
('S', 0.0630000000000000),
('T', 0.0910000000000000),
('U', 0.0280000000000000),
('V', 0.0100000000000000),
('W', 0.0230000000000000),
('X', 0.00100000000000000),
('Y', 0.0200000000000000),
('Z', 0.00100000000000000)]
The characteristic frequency probability distribution table
of Lewand [Lew2000]_::
sage: table = A.characteristic_frequency(table_name="lewand")
sage: sorted(table.items())
<BLANKLINE>
[('A', 0.0816700000000000),
('B', 0.0149200000000000),
('C', 0.0278200000000000),
('D', 0.0425300000000000),
('E', 0.127020000000000),
('F', 0.0222800000000000),
('G', 0.0201500000000000),
('H', 0.0609400000000000),
('I', 0.0696600000000000),
('J', 0.00153000000000000),
('K', 0.00772000000000000),
('L', 0.0402500000000000),
('M', 0.0240600000000000),
('N', 0.0674900000000000),
('O', 0.0750700000000000),
('P', 0.0192900000000000),
('Q', 0.000950000000000000),
('R', 0.0598700000000000),
('S', 0.0632700000000000),
('T', 0.0905600000000000),
('U', 0.0275800000000000),
('V', 0.00978000000000000),
('W', 0.0236000000000000),
('X', 0.00150000000000000),
('Y', 0.0197400000000000),
('Z', 0.000740000000000000)]
Illustrating the difference between :func:`characteristic_frequency`
and :func:`frequency_distribution() <sage.monoids.string_monoid_element.StringMonoidElement.frequency_distribution>`::
sage: A = AlphabeticStrings()
sage: M = A.encoding("abcd")
sage: FD = M.frequency_distribution().function()
sage: sorted(FD.items())
<BLANKLINE>
[(A, 0.250000000000000),
(B, 0.250000000000000),
(C, 0.250000000000000),
(D, 0.250000000000000)]
sage: CF = A.characteristic_frequency()
sage: sorted(CF.items())
<BLANKLINE>
[('A', 0.0820000000000000),
('B', 0.0150000000000000),
('C', 0.0280000000000000),
('D', 0.0430000000000000),
('E', 0.127000000000000),
('F', 0.0220000000000000),
('G', 0.0200000000000000),
('H', 0.0610000000000000),
('I', 0.0700000000000000),
('J', 0.00200000000000000),
('K', 0.00800000000000000),
('L', 0.0400000000000000),
('M', 0.0240000000000000),
('N', 0.0670000000000000),
('O', 0.0750000000000000),
('P', 0.0190000000000000),
('Q', 0.00100000000000000),
('R', 0.0600000000000000),
('S', 0.0630000000000000),
('T', 0.0910000000000000),
('U', 0.0280000000000000),
('V', 0.0100000000000000),
('W', 0.0230000000000000),
('X', 0.00100000000000000),
('Y', 0.0200000000000000),
('Z', 0.00100000000000000)]
TESTS:
The table name must be either "beker_piper" or "lewand"::
sage: table = A.characteristic_frequency(table_name="")
Traceback (most recent call last):
...
ValueError: Table name must be either 'beker_piper' or 'lewand'.
sage: table = A.characteristic_frequency(table_name="none")
Traceback (most recent call last):
...
ValueError: Table name must be either 'beker_piper' or 'lewand'.
"""
supported_tables = ["beker_piper", "lewand"]
if table_name not in supported_tables:
raise ValueError(
"Table name must be either 'beker_piper' or 'lewand'.")
from copy import copy
if table_name == "beker_piper":
return copy(self._characteristic_frequency_beker_piper)
if table_name == "lewand":
return copy(self._characteristic_frequency_lewand)
def encoding(self, S):
r"""
The encoding of the string ``S`` in the alphabetic string monoid,
obtained by the monoid homomorphism
::
A -> A, ..., Z -> Z, a -> A, ..., z -> Z
and stripping away all other characters. It should be noted that
this is a non-injective monoid homomorphism.
EXAMPLES::
sage: S = AlphabeticStrings()
sage: s = S.encoding("The cat in the hat."); s
THECATINTHEHAT
sage: s.decoding()
'THECATINTHEHAT'
"""
return self(strip_encoding(S))
AlphabeticStrings = AlphabeticStringMonoid
| 32.593516
| 126
| 0.524254
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.