content
stringlengths 5
1.05M
|
|---|
from time import sleep
from random import randint
print('\n\33[1;32m{:+^40}\33[m'.format(' JOKENPÔ '))
print('\nVamos jogar?')
itens = ('Pedra', 'Papel', 'Tesoura')
comp = randint(0, 2)
print('''Suas opções:
[ 0 ] Pedra
[ 1 ] Papel
[ 2 ] Tesoura''')
jog = int(input('Qual a sua jogada? '))
print('\n JO')
sleep(1)
print(' KEN')
sleep(1)
print(' PÔ\n')
print('-='*20)
print('Computador jogou: {}'.format(itens[comp]))
print('Jogador jogou: {}'.format(itens[jog]))
print('-='*20)
if comp == 0: # Computador jogou Pedra
if jog == 0:
print('\n\33[1;33mEMPATE!\33[m')
elif jog == 1:
print('\n\33[1;32mJOGADOR VENCEU\33[m!')
elif jog == 2:
print('\n\33[1;31mCOMPUTADOR VENCEU\33[m!')
else:
print('\n\33[32;43mJOGADA INVÁLIDA!!\33[m')
elif comp == 1: # Comoutador jogou Papel
if jog == 0:
print('\n\33[1;31mCOMPUTADOR VENCEU\33[m!')
elif jog == 1:
print('\n\33[1;33mEMPATE!\33[m')
elif jog == 2:
print('\n\33[1;32mJOGADOR VENCEU\33[m!')
else:
print('\n\33[32;43mJOGADA INVÁLIDA!!\33[m')
elif comp == 2: # Computador jogou Tesoura
if jog == 0:
print('\n\33[1;32mJOGADOR VENCEU\33[m!')
elif jog == 1:
print('\n\33[1;31mCOMPUTADOR VENCEU\33[m!')
elif jog == 2:
print('\n\33[1;33mEMPATE!\33[m')
else:
print('\n\33[32;43mJOGADA INVÁLIDA!!\33[m')
|
""".oof"""
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 1
animation_ttl = range(0, 103)
input_str = event.pattern_match.group(1)
if input_str == "oof":
await event.edit(input_str)
animation_chars = [
"O",
"Oo",
"Ooo",
"Ooooo",
"Oooooo",
"Ooooooo",
"Oooooooo",
"Ooooooooo",
"Oooooooooo",
"Ooooooooooo",
"Oooooooooooo",
"Ooooooooooooo",
"Oooooooooooooof"
]
for i in animation_ttl:
await event.edit(animation_chars[i % 103])
|
import sendgrid
import os
from sendgrid.helpers.mail import *
# TODO: change from address and names
from_address = "no-reply@marchingbytes.com"
from_name = "MarchingBytes Automated"
def send_email(to, subject, body):
api_key = os.environ.get("SENDGRID_API_KEY")
if api_key is None:
print("Sendgrid not configured!")
print(f"to {to}, subject {subject}, body {body}")
return
sg = sendgrid.SendGridAPIClient(api_key)
from_sender = Email(email=from_address, name=from_name)
my_mail = Mail(from_email=from_sender, to_emails=to, subject=subject, html_content=body)
response = sg.send(my_mail)
print(response.status_code)
print(response.body)
print(response.headers)
|
import numpy as np
from astropy import wcs
from matplotlib import pyplot
import h5py
import binFuncs
from scipy.interpolate import interp1d
import os
from matplotlib.patches import Ellipse
from Mapping import Mapper
from MappingAzEl import MapperAzEl
from MappingSun import MapperSun
from tqdm import tqdm
import click
import ast
class PythonLiteralOption(click.Option):
def type_cast_value(self,ctx,value):
if isinstance(value,str):
try:
return ast.literal_eval(value)
except:
raise click.BadParameter(value)
else:
return value
@click.command()
@click.argument('filename')#, help='Level 1 hdf5 file')
@click.option('--image_directory', default=None, help='Output image header directory')
@click.option('--band_average', default=True,type=bool, help='Average channels into single map')
@click.option('--feed_average', default=False,type=bool, help='Average all feeds into single map')
@click.option('--feeds', default=[1], cls=PythonLiteralOption, help='List of feeds to use (index from 0)')
@click.option('--make_hits', default=True,type=bool, help='Make hit maps')
@click.option('--make_sky', default=True,type=bool, help='Make sky maps')
@click.option('--cdelt', default=[1.,1.],cls=PythonLiteralOption, help='WCS cdelt parameter of form [x_pix, y_pix] in arcmin')
@click.option('--field_width', default=None, cls=PythonLiteralOption, help='Field width list of form [ra_width, dec_width]')
@click.option('--ctype', default=['RA---TAN','DEC--TAN'], cls=PythonLiteralOption, help='Field WCS ctype list of form [RATYPE, DECTYPE]')
@click.option('--crval', default=None, cls=PythonLiteralOption, help='Field centre list of form [RA_cen, Dec_cen], (Default: None, take ra/dec from average of scans)')
@click.option('--source', default=None, help='Source name for field centre, if source unknown ignore (Default: None, take ra/dec centre from average ra/dec)')
@click.option('--plot_circle',default=False,type=bool, help='Overplot a circle of radius plot_circle_radius (Default: False)')
@click.option('--plot_circle_radius', default=1,type=float, help='Radius of over plotted circle')
@click.option('--az_el_mode',default=False,type=bool, help='Plot in az/el coordinates(Default: False)')
@click.option('--sun_mode',default=False,type=bool, help='Plot in Sun centric coordinates (Default: False)')
def call_level1_hitmaps(filename,
image_directory,
band_average,
feed_average,
feeds,
make_hits,
make_sky,
field_width,
cdelt,
ctype,
crval,
source,
plot_circle,
plot_circle_radius,
az_el_mode,
sun_mode):
level1_hitmaps(filename,
image_directory,
band_average,
feed_average,
feeds,
make_hits,
make_sky,
field_width,
cdelt,
ctype,
crval,
source,
plot_circle,
plot_circle_radius,
az_el_mode,
sun_mode)
def level1_hitmaps(filename,
image_directory,
band_average=True,
feed_average=False,
feeds=[1],
make_hits=True,
make_sky=True,
field_width=None,
cdelt=[1./60.,1./60.],
ctype=['RA---TAN','DEC--TAN'],
crval=None,
source='None',
plot_circle=False,
plot_circle_radius=1,
AzElMode=False,
SunMode=False):
"""Plot hit maps for feeds
Arguments:
filename: the name of the COMAP Level-1 file
Keywords:
feeds: Feeds (indexing starting from 0), can be list, tuple, range
makeHitMap: Make the hit map
makeAvgMap: Make the band average map and hit map
cdelt: pixel size in degrees
fieldWidth: image width in degrees
"""
try:
fd = h5py.File(filename,'r')
except OSError:
print('Unable to open file {}'.format(filename))
return
# cdelt given in arcmin
if not isinstance(field_width, type(None)):
xpixelWidth = int(field_width[0]/cdelt[0]*60)
ypixelWidth = int(field_width[1]/cdelt[1]*60)
image_width = [xpixelWidth, ypixelWidth]
else:
image_width = None
if isinstance(image_directory, type(None)):
image_directory = filename.split('/')[-1].split('.')[0]
if not os.path.exists(image_directory):
os.makedirs(image_directory)
if AzElMode:
mapper = MapperAzEl(makeHitMap=make_hits,
makeAvgMap=make_sky,
crval=crval,
cdelt=cdelt,
npix=image_width,
image_directory=image_directory,
ctype=ctype)
elif SunMode:
mapper = MapperSun(makeHitMap=make_hits,
makeAvgMap=make_sky,
crval=crval,
cdelt=cdelt,
npix=image_width,
image_directory=image_directory,
ctype=ctype)
else:
mapper = Mapper(makeHitMap=make_hits,
makeAvgMap=make_sky,
image_directory=image_directory,
crval=crval,
cdelt=cdelt,
npix=image_width,
ctype=ctype)
mapper.setLevel1(fd, source)
if 'all' in feeds:
feeds = [feed for feed in fd['spectrometer/feeds'][:] if feed != 20]
if feed_average:
maps = mapper(feeds, usetqdm=True)
fstr = '-'.join(['{:02d}'.format(feed) for feed in feeds if feed in mapper.feed_ids])
outdir = '{}'.format(image_directory)
mapper.plotImages(feeds,
'{}/Hitmap_FeedAvg.png'.format(outdir),
'{}/BandAverage_FeedAvg.png'.format(outdir),
plot_circle,
plot_circle_radius)
# mapper.SaveMaps('{}/BandAverage_FeedAvg.fits'.format(image_directory))
for feed in tqdm(feeds):
if not isinstance(mapper.map_bavg,type(None)):
mapper.map_bavg *= 0.
mapper.hits = None
maps = mapper(feed)
fstr = '-'.join(['{:02d}'.format(feed)])
outdir = '{}'.format(image_directory)
mapper.plotImages([feed],
'{}/Hitmap_Feed{:02d}.png'.format(outdir,feed),
'{}/BandAverage_Feed{:02d}.png'.format(outdir,feed),
plot_circle,
plot_circle_radius)
#mapper.SaveMaps('{}/BandAverage_Feed{:02d}.fits'.format(image_directory,feed))
if __name__ == "__main__":
call_level1_hitmaps()
|
import matplotlib.pyplot as plot
import matplotlib.animation as animation
import numpy as np
import seaborn as sns
def calculateLoss(X, Y, weight, bias = 0):
predictedLine = X * weight + bias
return np.average((predictedLine - Y) ** 2)
def trainWithBias1(X, Y):
weight = 0
bias = 0
learningRate = 50
for i in range(20000):
currentLoss = calculateLoss(X, Y, weight, bias)
if i % 200 == 0:
plot.plot(X, Y, "bo")
image = plot.plot([0, 10], [0 + bias, 10 * weight + bias], linewidth = 1.0, color = "g")
images1.append(image)
print(i, weight, bias, currentLoss)
if currentLoss > calculateLoss(X, Y, weight + learningRate, bias):
weight += learningRate
elif currentLoss > calculateLoss(X, Y, weight - learningRate, bias):
weight -= learningRate
elif currentLoss > calculateLoss(X, Y, weight, bias + learningRate):
bias += learningRate
elif currentLoss > calculateLoss(X, Y, weight, bias - learningRate):
bias -= learningRate
else:
return (weight, bias)
return (weight, bias)
def trainWithBias2(X, Y):
weight = 0
bias = 0
learningRate = 50
for i in range(20000):
currentLoss = calculateLoss(X, Y, weight, bias)
loss1 = calculateLoss(X, Y, weight + learningRate, bias)
loss2 = calculateLoss(X, Y, weight - learningRate, bias)
loss3 = calculateLoss(X, Y, weight, bias + learningRate)
loss4 = calculateLoss(X, Y, weight, bias - learningRate)
if i % 200 == 0:
plot.plot(X, Y, "bo")
image = plot.plot(
[0, 10],
[0 + bias, 10 * weight + bias],
linewidth = 1.0,
color = "g")
images2.append(image)
print(i, weight, bias, currentLoss)
minLoss = min(currentLoss, loss1, loss2, loss3, loss4)
if minLoss == loss1:
weight += learningRate
elif minLoss == loss2:
weight -= learningRate
elif minLoss == loss3:
bias += learningRate
elif minLoss == loss4:
bias -= learningRate
else:
return (weight, bias)
return (weight, bias)
def prepareImage():
sns.set()
plot.xticks(fontsize = 10)
plot.yticks(fontsize = 10)
plot.xlabel("RM", fontsize = 10)
plot.ylabel("MEDV", fontsize = 10)
images1 = []
images2 = []
data = np.genfromtxt('./dataset/housing.csv', delimiter = ',', skip_header = 1)
X = data[:, 0]
Y = data[:, 3]
print("---------1----------")
prepareImage()
figure1 = plot.figure()
[weight1, bias1] = trainWithBias1(X, Y)
ani1 = animation.ArtistAnimation(figure1, images1, interval = 150)
ani1.save("/src/images/regression-with-bias1.gif", writer = "imagemagick")
plot.plot(X, Y,"bo")
plot.plot([0, 10], [0 + bias1, 10 * weight1 + bias1], linewidth = 1.0, color = "g")
plot.savefig("/src/images/linear-regression-with-bias1.png")
print("---------2----------")
prepareImage()
figure2 = plot.figure()
[weight2, bias2] = trainWithBias2(X, Y)
ani2 = animation.ArtistAnimation(figure2, images2, interval = 150)
ani2.save("/src/images/regression-with-bias2.gif", writer = "imagemagick")
plot.plot(X, Y,"bo")
plot.plot([0, 10], [0 + bias2, 10 * weight2 + bias2], linewidth = 1.0, color = "g")
plot.savefig("/src/images/linear-regression-with-bias2.png")
|
#!/usr/bin/env python2.7
import os
import argparse
import git
import sys
from datetime import datetime
import re
def GetVersion(backupFile, label=''):
"""As getLongVersion(), but only return the leading *.*.* value."""
raw = GetLongVersion(backupFile, label)
# Just return the first 3 parts of the version
short_ver = re.findall("^\\d+\\.\\d+\\.\\d+", raw)
return short_ver[0]
def GetLongVersion(backupFile, label=''):
"""Create a detailed version string based on the state of
the software, as it exists in the repository."""
if open_gee_version.long_version_string:
return open_gee_version.long_version_string
if _CheckGitAvailable():
ret = _GitGeneratedLongVersion()
# Without git, must use the backup file to create a string.
else:
base = _ReadBackupVersionFile(backupFile)
ret = '-'.join([base, _GetDateString()])
# Append the label, if there is one.
if len(label):
ret = '.'.join([ret, label])
# Cache the long version string:
open_gee_version.long_version_string = ret
return ret
def _GitGeneratedLongVersion():
"""Calculate the version name and build number into a single build string."""
versionName, buildNumber = _GitVersionNameAndBuildNumber()
return "{0}-{1}".format(versionName, buildNumber)
def _GitCommitCount(tagName='HEAD', baseRef=''):
"""calculate git commit counts"""
repo = _GetRepository()
if not baseRef:
return len(list(repo.iter_commits(tagName)))
else:
return len(list(repo.iter_commits(baseRef + '..' + tagName)))
def _GitVersionNameAndBuildNumber():
"""Get the version name and build number based on state of git
Use a tag only if HEAD is directly pointing to it and it is a
release build tag (see _GetCommitRawDescription for details)
otherwise use the branch name"""
# if head is pointing to a release tag use that
# this is needed so that if a release branch is
# checked out and the tail tag was removed if the
# head of that release branch is pointing to a
# release tag we still do the expected thing
releaseTag = _GetCurrentCommitReleaseTag()
if releaseTag:
# Extract version name and build number
# from the tag (should be a release build tag)
splitTag = releaseTag.split('-')
return splitTag[0], splitTag[1]
else:
# Use branch name if we are not a detached HEAD
branchName = _GitBranchName()
if not branchName:
# we are a detached head not on a release tag so just treat
# the the first part of the raw describe as the release name
# added the b to the build number to signal this is not a releasable build
return _GetCommitRawDescription().split('-')[0], "b{0}-{1}".format(_GitCommitCount(), open_gee_version.get_commit_hash_from_tag('HEAD'))
else:
# Get the version name from the branch name
if _IsReleaseBranch(branchName):
tailTag = _GetReleaseTailTag(branchName)
return _GetReleaseVersionName(branchName), '{0}.{1}'.format(_GitBranchedCommitCount(tailTag), _GitCommitCount('HEAD', tailTag))
else:
# added the b to the build number to signal this is not a releasable build
return _GetCommitRawDescription().split('-')[0], "b{0}-{1}".format(_GitCommitCount(), _sanitizeBranchName(branchName))
def _GitBranchedCommitCount(tailTag):
"""Returns what the build number was from the branch point"""
prevRelTag = _GitPreviousReleaseTag(tailTag)
prevCommitCount = ''
if prevRelTag:
prevCommitCount = prevRelTag.split('-')[1]
else:
prevCommitCount = str(_GitCommitCount(tailTag))
return prevCommitCount
def _GitPreviousReleaseTag(tailTagName):
"""Looks for the tail tag and if it finds it then it looks for any release build tags
that are also pointing to the same commit"""
tailCommitHash = open_gee_version.get_commit_hash_from_tag(tailTagName)
tags = open_gee_version.get_tags_from_commit_hash(tailCommitHash)
for tag in tags:
if tag != tailTagName:
if _IsReleaseBuildTag(tag):
return tag
else:
pass
else:
pass
return ''
def _GitTagRealCommitIdWindows(tagName):
"""use shell command to retrieve commit id of where the tag points to (Windows version)"""
# for some reason .hexsha was not returning the same id....
commitId = os.popen("git rev-list -n 1 \"{0}\"".format(tagName.replace("\"", "\\\""))).read().strip()
return commitId
def _GitTagRealCommitIdLinux(tagName):
"""use shell command to retrieve commit id of where the tag points to (Linux version)"""
# for some reason .hexsha was not returning the same id....
commitId = os.popen("git rev-list -n 1 '{0}'".format(tagName.replace("'", "'\"'\"'"))).read().strip()
return commitId
def _GitTagRealCommitId(tagName):
"""use shell command to retrieve commit id of where the tag points to"""
if os.name is 'nt':
return _GitTagRealCommitIdWindows(tagName)
else:
return _GitTagRealCommitIdLinux(tagName)
def _git_tag_list():
"""use shell command to retrieve a list of tags"""
# python-git is broken on some plateforms so just using this more reliable method
return os.popen("git tag -l").read().split('\n')
def _IsReleaseBuildTag(tagName):
"""checks if the tag follows the pattern where if the
tag is split on dash and the has at least two elements
and the first two elements is a series of numbers delimited
by dot and nothing else in those first two elements"""
splitTag = tagName.split('-')
if len(splitTag) > 1:
return (re.match('^[0-9]+((\.[0-9]+)+)$', splitTag[0]) and re.match('^([0-9]+((\.[0-9]+)+)|[0-9]+)$', splitTag[1]))
return False
def _IsReleaseBranch(branchName):
"""Check if the branch name is a release branch"""
# a release branch begins with 'release_' and has
# a base tag that matches the release name
if branchName[:8] == 'release_':
tailTag = _GetReleaseTailTag(branchName)
if _gitHasTag(tailTag):
return True
else:
# see if we can pull the tag down from any of the remotes
repo = _GetRepository()
for remote in repo.remotes:
try:
remote.fetch('+refs/tags/{0}:refs/tags/{0}'.format(tailTag), None, **{'no-tags':True})
except:
pass
# try one more time after the fetch attempt(s)
return (_gitHasTag(tailTag) != '')
else:
return False
def _gitHasTag(tagName):
"""See if a tag exists in git"""
return open_gee_version.get_commit_hash_from_tag(tagName)
def _sanitizeBranchName(branchName):
"""sanitize branch names to ensure some characters are not used"""
return re.sub('[$?*`\\-"\'\\\\/\\s]', '_', branchName)
def _GetReleaseVersionName(branchName):
"""removes pre-pended 'release_' from branch name"""
return branchName[8:]
def _GetReleaseTailTag(branchName):
"""removes pre-pended 'release_' from branch name"""
return _GetReleaseVersionName(branchName) + '-RC1'
def _GitBranchName():
"""Returns current branch name or empty string"""
try:
return _GetRepository().active_branch.name
except TypeError:
return ''
def _IsGitDescribeFirstParentSupported():
"""Checks whether --first-parent parameter is valid for the
version of git available"""
try:
repo = _GetRepository()
repo.git.describe('--first-parent')
return True
except git.exc.GitCommandError:
pass
return False
def _GetCommitRawDescription():
"""Returns description of current commit"""
args = ['--tags', '--match', '[0-9]*\.[0-9]*\.[0-9]*\-*']
if _IsGitDescribeFirstParentSupported():
args.insert(0, '--first-parent')
repo = _GetRepository()
raw = repo.git.describe(*args)
raw = raw.rstrip()
return raw
def _GetCurrentCommitReleaseTag():
"""If head is pointing to a release tag return the name of the release tag"""
headCommitHash = open_gee_version.get_commit_hash_from_tag('HEAD')
tags = open_gee_version.get_tags_from_commit_hash(headCommitHash)
for tag in tags:
if _IsReleaseBuildTag(tag):
return tag
else:
pass
return ''
def _CheckGitAvailable():
"""Try the most basic of git commands, to see if there is
currently any access to a repository."""
try:
repo = _GetRepository()
except git.exc.InvalidGitRepositoryError:
return False
return True
def _GetRepository():
"""Get a reference to the Git Repository.
Is there a cleaner option than searching from the current location?"""
# The syntax is different between library versions (particularly,
# those used by Centos 6 vs Centos 7).
try:
return git.Repo('.', search_parent_directories=True)
except TypeError:
return git.Repo('.')
def _ReadBackupVersionFile(target):
"""There should be a file checked in with the latest version
information available; if git isn't available to provide
information, then use this file instead."""
with open(target, 'r') as fp:
line = fp.readline()
return line
def _GetDateString():
"""Returns formatted date string representing current UTC time"""
return datetime.utcnow().strftime("%Y%m%d%H%M")
class OpenGeeVersion(object):
"""A class for storing Open GEE version information."""
def __init__(self):
# Cache version strings:
self.short_version_string = None
self.long_version_string = None
# Default parameter for GetVersion functions
self_path, _ = os.path.split(os.path.realpath(__file__))
self.backup_file = os.path.join(self_path, '..', 'version.txt')
self.label = ''
self.tag_to_git_hash = None
self.git_hash_to_tags = None
def _init_tag_maps(self):
if self.tag_to_git_hash is None or self.git_hash_to_tags is None:
self.tag_to_git_hash = {}
self.git_hash_to_tags = {}
tags = _git_tag_list()
tags.append('HEAD') # make sure HEAD is in the list even thought it really isn't a tag
for tag in tags:
tag.strip()
if tag:
git_hash = _GitTagRealCommitId(tag)
self.tag_to_git_hash[tag] = git_hash
if git_hash in self.git_hash_to_tags:
self.git_hash_to_tags[git_hash].append(tag)
else:
self.git_hash_to_tags[git_hash] = [tag]
else:
pass # ignore empty strings
def get_commit_hash_from_tag(self, tagName):
self._init_tag_maps()
return self.tag_to_git_hash.get(tagName, '')
def get_tags_from_commit_hash(self, commitHash):
self._init_tag_maps()
return self.git_hash_to_tags.get(commitHash, [])
def get_short(self):
"""Returns the short version string."""
if not self.short_version_string:
# Just return the first 3 parts of the version string
short_ver = re.findall("^\\d+\\.\\d+\\.\\d+", self.get_long())
self.short_version_string = short_ver[0]
return self.short_version_string
def set_short(self, value):
"""Overrides the short version string by using the given value."""
self.short_version_string = value
def get_long(self):
"""Returns the short version string."""
if not self.long_version_string:
self.long_version_string = GetLongVersion(self.backup_file, self.label)
return self.long_version_string
def set_long(self, value):
"""Overrides the long version string by using the given value.
Overriding the long version string would indirectly override the short
version string, as well, unless the former is also overridden.
"""
self.long_version_string = value
def get_warning_message(self):
"""Returns None, or a string describing known issues."""
return None if not _CheckGitAvailable() or _IsGitDescribeFirstParentSupported() else '''\
WARNING: Git version 1.8.4 or later is required to correctly determine the Open GEE version being built.
The Open GEE version is calculated from tags using the "git describe" command.
The "--first-parent" parameter introduced in Git 1.8.4 allows proper version calcuation on all branches.
Without the --first-parent parameter, the version calculated may be incorrect, depending on which branch is being built.
For information on upgrading Git, see:
https://github.com/google/earthenterprise/wiki/Frequently-Asked-Questions-(FAQs)#how-do-i-upgrade-git-to-the-recommended-version-for-building-google-earth-enterprise\
'''
# Exported variable for use by other modules:
open_gee_version = OpenGeeVersion()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--long", action="store_true", help="Output long format of version string")
args = parser.parse_args()
sys.stdout.write(open_gee_version.get_long() if args.long else open_gee_version.get_short())
sys.stdout.write('\n')
sys.stdout.flush()
warning_message = open_gee_version.get_warning_message()
if warning_message is not None:
sys.stderr.write(warning_message)
sys.stderr.write('\n')
sys.stderr.flush()
__all__ = ['open_gee_version']
if __name__ == "__main__":
main()
|
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
import scipy.sparse as sp
from sklearn.metrics import mean_squared_error
from math import sqrt
import itertools
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import random
def novelty(predicted, pop, u, n):
"""
Computes the novelty for a list of recommendations
Parameters
----------
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
pop: dictionary
A dictionary of all items alongside of its occurrences counter in the training data
example: {1198: 893, 1270: 876, 593: 876, 2762: 867}
u: integer
The number of users in the training data
n: integer
The length of recommended lists per user
Returns
----------
novelty:
The novelty of the recommendations in system level
mean_self_information:
The novelty of the recommendations in recommended top-N list level
----------
Metric Defintion:
Zhou, T., Kuscsik, Z., Liu, J. G., Medo, M., Wakeling, J. R., & Zhang, Y. C. (2010).
Solving the apparent diversity-accuracy dilemma of recommender systems.
Proceedings of the National Academy of Sciences, 107(10), 4511-4515.
"""
mean_self_information = []
k = 0
for sublist in predicted:
self_information = 0
k += 1
for i in sublist:
self_information += np.sum(-np.log2(pop[i]/u))
mean_self_information.append(self_information/n)
novelty = sum(mean_self_information)/k
return novelty, mean_self_information
def prediction_coverage(predicted, catalog):
"""
Computes the prediction coverage for a list of recommendations
Parameters
----------
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
catalog: list
A list of all unique items in the training data
example: ['A', 'B', 'C', 'X', 'Y', Z]
Returns
----------
prediction_coverage:
The prediction coverage of the recommendations as a percent
rounded to 2 decimal places
----------
Metric Defintion:
Ge, M., Delgado-Battenfeld, C., & Jannach, D. (2010, September).
Beyond accuracy: evaluating recommender systems by coverage and serendipity.
In Proceedings of the fourth ACM conference on Recommender systems (pp. 257-260). ACM.
"""
predicted_flattened = [p for sublist in predicted for p in sublist]
unique_predictions = len(set(predicted_flattened))
prediction_coverage = round(unique_predictions/(len(catalog)* 1.0)*100,2)
return prediction_coverage
def catalog_coverage(predicted, catalog, k):
"""
Computes the catalog coverage for k lists of recommendations
Parameters
----------
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
catalog: list
A list of all unique items in the training data
example: ['A', 'B', 'C', 'X', 'Y', Z]
k: integer
The number of observed recommendation lists
which randomly choosed in our offline setup
Returns
----------
catalog_coverage:
The catalog coverage of the recommendations as a percent
rounded to 2 decimal places
----------
Metric Defintion:
Ge, M., Delgado-Battenfeld, C., & Jannach, D. (2010, September).
Beyond accuracy: evaluating recommender systems by coverage and serendipity.
In Proceedings of the fourth ACM conference on Recommender systems (pp. 257-260). ACM.
"""
sampling = random.choices(predicted, k=k)
predicted_flattened = [p for sublist in sampling for p in sublist]
L_predictions = len(set(predicted_flattened))
catalog_coverage = round(L_predictions/(len(catalog)*1.0)*100,2)
return catalog_coverage
def _ark(actual, predicted, k=10):
"""
Computes the average recall at k.
Parameters
----------
actual : list
A list of actual items to be predicted
predicted : list
An ordered list of predicted items
k : int, default = 10
Number of predictions to consider
Returns:
-------
score : int
The average recall at k.
"""
if len(predicted)>k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i,p in enumerate(predicted):
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
if not actual:
return 0.0
return score / len(actual)
def mark(actual, predicted, k=10):
"""
Computes the mean average recall at k.
Parameters
----------
actual : a list of lists
Actual items to be predicted
example: [['A', 'B', 'X'], ['A', 'B', 'Y']]
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
Returns:
-------
mark: int
The mean average recall at k (mar@k)
"""
return np.mean([_ark(a,p,k) for a,p in zip(actual, predicted)])
def personalization(predicted):
"""
Personalization measures recommendation similarity across users.
A high score indicates good personalization (user's lists of recommendations are different).
A low score indicates poor personalization (user's lists of recommendations are very similar).
A model is "personalizing" well if the set of recommendations for each user is different.
Parameters:
----------
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
Returns:
-------
The personalization score for all recommendations.
"""
def make_rec_matrix(predicted):
df = pd.DataFrame(data=predicted).reset_index().melt(
id_vars='index', value_name='item',
)
df = df[['index', 'item']].pivot(index='index', columns='item', values='item')
df = pd.notna(df)*1
rec_matrix = sp.csr_matrix(df.values)
return rec_matrix
#create matrix for recommendations
predicted = np.array(predicted)
rec_matrix_sparse = make_rec_matrix(predicted)
#calculate similarity for every user's recommendation list
similarity = cosine_similarity(X=rec_matrix_sparse, dense_output=False)
#get indicies for upper right triangle w/o diagonal
upper_right = np.triu_indices(similarity.shape[0], k=1)
#calculate average similarity
personalization = np.mean(similarity[upper_right])
return 1-personalization
def _single_list_similarity(predicted, feature_df, u):
"""
Computes the intra-list similarity for a single list of recommendations.
Parameters
----------
predicted : a list
Ordered predictions
Example: ['X', 'Y', 'Z']
feature_df: dataframe
A dataframe with one hot encoded or latent features.
The dataframe should be indexed by the id used in the recommendations.
Returns:
-------
ils_single_user: float
The intra-list similarity for a single list of recommendations.
"""
# exception predicted list empty
if not(predicted):
raise Exception('Predicted list is empty, index: {0}'.format(u))
#get features for all recommended items
recs_content = feature_df.loc[predicted]
recs_content = recs_content.dropna()
recs_content = sp.csr_matrix(recs_content.values)
#calculate similarity scores for all items in list
similarity = cosine_similarity(X=recs_content, dense_output=False)
#get indicies for upper right triangle w/o diagonal
upper_right = np.triu_indices(similarity.shape[0], k=1)
#calculate average similarity score of all recommended items in list
ils_single_user = np.mean(similarity[upper_right])
return ils_single_user
def intra_list_similarity(predicted, feature_df):
"""
Computes the average intra-list similarity of all recommendations.
This metric can be used to measure diversity of the list of recommended items.
Parameters
----------
predicted : a list of lists
Ordered predictions
Example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
feature_df: dataframe
A dataframe with one hot encoded or latent features.
The dataframe should be indexed by the id used in the recommendations.
Returns:
-------
The average intra-list similarity for recommendations.
"""
feature_df = feature_df.fillna(0)
Users = range(len(predicted))
ils = [_single_list_similarity(predicted[u], feature_df, u) for u in Users]
return np.mean(ils)
def mse(y, yhat):
"""
Computes the mean square error (MSE)
Parameters
----------
yhat : Series or array. Reconstructed (predicted) ratings or interaction values.
y: original true ratings or interaction values.
Returns:
-------
The mean square error (MSE)
"""
mse = mean_squared_error(y, yhat)
return mse
def rmse(y, yhat):
"""
Computes the root mean square error (RMSE)
Parameters
----------
yhat : Series or array. Reconstructed (predicted) ratings or values
y: original true ratings or values.
Returns:
-------
The mean square error (MSE)
"""
rmse = sqrt(mean_squared_error(y, yhat))
return rmse
def make_confusion_matrix(y, yhat):
"""
Calculates and plots a confusion matrix
Parameters
----------
y : list or array of actual interaction values such as ratings
yhat: list or array of actual predicted interaction values
Returns:
-------
A confusion matrix plot
"""
cm = confusion_matrix(y, yhat, labels=[1,0])
cm = np.round(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis],4)*100
fmt = ".2f"
thresh = cm.max() / 2.
descriptions = np.array([["True Positive", "False Negative"], ["False Positive", "True Negatives"]])
colors = np.array([["green", "red"], ["red", "green"]])
plt.imshow([[0,0],[0,0]], interpolation='nearest', cmap=plt.cm.Greys)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt)+'%\n' + descriptions[i, j],
horizontalalignment="center",
color=colors[i,j])
plt.axhline(y=0.5, xmin=0, xmax=1, color="black", linewidth=0.75)
plt.axvline(x=0.5, ymin=0, ymax=1, color="black", linewidth=0.75)
plt.ylabel('True')
plt.xlabel('Predicted')
plt.title("Confusion Matrix")
plt.xticks([0,1], [1,0], rotation=45)
plt.yticks([0,1], [1,0])
plt.show()
def recommender_precision(predicted, actual):
"""
Computes the precision of each user's list of recommendations, and averages precision over all users.
----------
actual : a list of lists
Actual items to be predicted
example: [['A', 'B', 'X'], ['A', 'B', 'Y']]
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
Returns:
-------
precision: int
"""
def calc_precision(predicted, actual):
prec = [value for value in predicted if value in actual]
prec = np.round(float(len(prec)) / float(len(predicted)), 4)
return prec
precision = np.mean(list(map(calc_precision, predicted, actual)))
return precision
def recommender_recall(predicted, actual):
"""
Computes the recall of each user's list of recommendations, and averages precision over all users.
----------
actual : a list of lists
Actual items to be predicted
example: [['A', 'B', 'X'], ['A', 'B', 'Y']]
predicted : a list of lists
Ordered predictions
example: [['X', 'Y', 'Z'], ['X', 'Y', 'Z']]
Returns:
-------
recall: int
"""
def calc_recall(predicted, actual):
reca = [value for value in predicted if value in actual]
reca = np.round(float(len(reca)) / float(len(actual)), 4)
return reca
recall = np.mean(list(map(calc_recall, predicted, actual)))
return recall
|
#from nlplingo.tasks.sequence.run import find_token_indices_of_markers, remove_tokens_at_indices
from nlplingo.tasks.sequence.utils import find_token_indices_of_markers, remove_tokens_at_indices
from nlplingo.oregon.event_models.uoregon.tools.xlmr import xlmr_tokenizer
from nlplingo.oregon.event_models.uoregon.tools.utils import *
upos_map = {'[PAD]': 0, '[UNK]': 1, 'ADJ': 2, 'ADP': 3, 'ADV': 4, 'AUX': 5, 'CCONJ': 6, 'DET': 7, 'INTJ': 8, 'NOUN': 9,
'NUM': 10, 'PART': 11, 'PRON': 12, 'PROPN': 13, 'PUNCT': 14, 'SCONJ': 15, 'SYM': 16, 'VERB': 17, 'X': 18}
#upos_map = {"[PAD]": 0, "[UNK]": 1, "ADP": 2, "DET": 3, "NOUN": 4, "PROPN": 5, "NUM": 6, "PUNCT": 7, "VERB": 8,
# "ADJ": 9, "PRON": 10, "ADV": 11, "AUX": 12, "PART": 13, "CCONJ": 14, "SCONJ": 15, "INTJ": 16, "SYM": 17,
# "X": 18}
# xpos_map = {"[PAD]": 0, "[UNK]": 1, "IN": 2, "DT": 3, "NN": 4, "NNP": 5, "CD": 6, ",": 7, "VBD": 8, "NNS": 9, "JJ": 10,
# "PRP": 11, "RB": 12, "VBN": 13, "WDT": 14, "VBG": 15, "TO": 16, "VB": 17, "RP": 18, ".": 19, "CC": 20,
# "EX": 21, "POS": 22, "WP": 23, "PRP$": 24, "HYPH": 25, "WRB": 26, "VBZ": 27, "JJR": 28, "MD": 29, "VBP": 30,
# "''": 31, "``": 32, ":": 33, "NNPS": 34, "JJS": 35, "-LRB-": 36, "-RRB-": 37, "PDT": 38, "UH": 39,
# "RBR": 40, "RBS": 41, "$": 42, "FW": 43, "ADD": 44, "WP$": 45, "SYM": 46, "LS": 47, "NFP": 48, "AFX": 49}
# following xpos based on stanfordnlp annotations on SerifXML pos_sequence
xpos_map = {'[PAD]': 0, '[UNK]': 1, '``': 2, ',': 3, ':': 4, '.': 5, "''": 6, '$': 7, 'ADD': 8, 'AFX': 9, 'CC': 10,
'CD': 11, 'DT': 12, 'EX': 13, 'FW': 14, 'HYPH': 15, 'IN': 16, 'JJ': 17, 'JJR': 18, 'JJS': 19, '-LRB-': 20,
'LS': 21, 'MD': 22, 'NFP': 23, 'NN': 24, 'NNP': 25, 'NNPS': 26, 'NNS': 27, 'PDT': 28, 'POS': 29, 'PRP': 30,
'PRP$': 31, 'RB': 32, 'RBR': 33, 'RBS': 34, 'RP': 35, '-RRB-': 36, 'SYM': 37, 'TO': 38, 'UH': 39, 'VB': 40,
'VBD': 41, 'VBG': 42, 'VBN': 43, 'VBP': 44, 'VBZ': 45, 'WDT': 46, 'WP': 47, 'WP$': 48, 'WRB': 49}
# following xpos based on original SerifXML pos-tags from parse tree
# xpos_map = {'[PAD]': 0, '[UNK]': 1, '``': 2, ',': 3, ':': 4, '.': 5, "''": 6, '$': 7, 'CC': 8, 'CD': 9, 'DATE-NNP': 10,
# 'DATE-NNPS': 11, 'DT': 12, 'EX': 13, 'FW': 14, 'IN': 15, 'JJ': 16, 'JJR': 17, 'JJS': 18, '-LRB-': 19,
# 'LS': 20, 'MD': 21, 'NN': 22, 'NNP': 23, 'NNPS': 24, 'NNS': 25, 'PDT': 26, 'POS': 27, 'PRP': 28, 'PRP$': 29,
# 'RB': 30, 'RBR': 31, 'RBS': 32, 'RP': 33, '-RRB-': 34, 'TO': 35, 'UH': 36, 'VB': 37, 'VBD': 38, 'VBG': 39,
# 'VBN': 40, 'VBP': 41, 'VBZ': 42, 'WDT': 43, 'WP': 44, 'WP$': 45, 'WRB': 46}
ner_map = {"[PAD]": 0, "[UNK]": 1, "O": 2,
"B-DATE": 3, "I-DATE": 4, "E-DATE": 5, "S-DATE": 6,
"B-CARDINAL": 7, "I-CARDINAL": 1, "E-CARDINAL": 1, "S-CARDINAL": 1,
"B-PERSON": 1, "I-PERSON": 1, "E-PERSON": 1, "S-PERSON": 1,
"B-EVENT": 1, "I-EVENT": 1, "E-EVENT": 1, "S-EVENT": 1,
"B-QUANTITY": 1, "I-QUANTITY": 1, "E-QUANTITY": 1, "S-QUANTITY": 1,
"B-TIME": 1, "I-TIME": 1, "E-TIME": 1, "S-TIME": 1,
"B-ORG": 1, "I-ORG": 1, "E-ORG": 1, "S-ORG": 1,
"B-GPE": 1, "I-GPE": 1, "E-GPE": 1, "S-GPE": 1,
"B-NORP": 1, "I-NORP": 1, "E-NORP": 1, "S-NORP": 1,
"B-WORK_OF_ART": 1, "I-WORK_OF_ART": 1, "E-WORK_OF_ART": 1, "S-WORK_OF_ART": 1,
"B-ORDINAL": 1, "I-ORDINAL": 1, "E-ORDINAL": 1, "S-ORDINAL": 1,
"B-FAC": 1, "I-FAC": 1, "E-FAC": 1, "S-FAC": 1,
"B-PRODUCT": 1, "I-PRODUCT": 1, "E-PRODUCT": 1, "S-PRODUCT": 1,
"B-LOC": 1, "I-LOC": 1, "E-LOC": 1, "S-LOC": 1,
"B-PERCENT": 1, "I-PERCENT": 1, "E-PERCENT": 1, "S-PERCENT": 1,
"B-MONEY": 1, "I-MONEY": 1, "E-MONEY": 1, "S-MONEY": 1,
"B-LANGUAGE": 1, "I-LANGUAGE": 1, "E-LANGUAGE": 1, "S-LANGUAGE": 1,
"B-LAW": 1, "E-LAW": 1, "I-LAW": 1, "S-LAW": 1}
# xpos_upos_map = {'[PAD]': '[PAD]', '[UNK]': '[UNK]', '``': 'PUNCT', ',': 'PUNCT', ':': 'PUNCT', '.': 'PUNCT',
# "''": 'PUNCT', '$': 'SYM', 'CC': 'CCONJ', 'CD': 'NUM', 'DATE-NNP': 'PROPN', 'DATE-NNPS': 'PROPN',
# 'DT': 'DET', 'EX': 'PRON', 'FW': 'X', 'IN': 'ADP', 'JJ': 'ADJ', 'JJR': 'ADJ', 'JJS': 'ADJ',
# '-LRB-': 'PUNCT', 'LS': 'NOUN', 'MD': 'AUX', 'NN': 'NOUN', 'NNP': 'PROPN', 'NNPS': 'PROPN',
# 'NNS': 'NOUN', 'PDT': 'DET', 'POS': 'PART', 'PRP': 'PRON', 'PRP$': 'PRON', 'RB': 'ADV', 'RBR': 'ADV',
# 'RBS': 'ADV', 'RP': 'ADP', '-RRB-': 'PUNCT', 'TO': 'PART', 'UH': 'INTJ', 'VB': 'VERB', 'VBD': 'VERB',
# 'VBG': 'VERB', 'VBN': 'VERB', 'VBP': 'AUX', 'VBZ': 'AUX', 'WDT': 'PRON', 'WP': 'PRON', 'WP$': 'PRON',
# 'WRB': 'ADV'}
deprel_map = {"[PAD]": 0, "[UNK]": 1, 'acl': 2, 'advcl': 3, 'advmod': 4, 'amod': 5, 'appos': 6, 'aux': 7, 'case': 8,
'cc': 9, 'ccomp': 10, 'compound': 11, 'conj': 12, 'cop': 13, 'csubj': 14, 'det': 15, 'discourse': 16,
'expl': 17, 'fixed': 18, 'flat': 19, 'goeswith': 20, 'iobj': 21, 'list': 22, 'mark': 23, 'nmod': 24,
'nsubj': 25, 'nummod': 26, 'obj': 27, 'obl': 28, 'orphan': 29, 'parataxis': 30, 'punct': 31, 'root': 32,
'vocative': 33, 'xcomp': 34}
# def map_xpos_to_upos(xpos):
# return xpos_upos_map.get(xpos, xpos_upos_map['[UNK]'])
def get_arguments(id2tag, tag_ids, ori_example, seperate_outputs=True):
"""
:type ori_example: nlplingo.tasks.sequence.example.SequenceExample
"""
#ori_text = ori_example['text']
#actual_length = len(ori_example['word'])
# print('id2tag=', id2tag)
# print('ori_example.words=', ori_example.words)
# print('tag_ids=', tag_ids)
"""
id2tag= {0: 'B-AGENT', 1: 'B-PATIENT', 2: 'I-AGENT', 3: 'I-PATIENT', 4: 'O'}
ori_example.words= ['Betancur', 'has', '$$$', 'backed', '$$$', 'the', 'broad', 'new', 'amnesty', 'offer', 'to', 'members', 'of', 'Colombia', "'s", 'guerrilla', 'movements', 'and', 'has', 'won', 'the', 'endorsement', 'of', 'some', 'of', 'the', 'leftist', 'insurgent', 'leaders', 'by', 'offering', 'negotiations', 'and', 'studies', 'of', 'substantial', 'political', 'and', 'electoral', 'reforms', '.']
tag_ids= tensor([0, 4, 4, 4, 4, 4, 4, 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 0], device='cuda:0')
The tag_ids are based on unmarked words, so we need to -2 from len(ori_example.words)
"""
actual_length = len(ori_example.words) - 2
tag_ids = tag_ids.long().data.cpu().numpy()
tag_ids = tag_ids[: actual_length]
tags = [id2tag[tag_id] for tag_id in tag_ids]
agent_tags = []
patient_tags = []
for tag in tags:
if 'AGENT' not in tag and 'PATIENT' not in tag:
agent_tags.append('O')
patient_tags.append('O')
else:
sub_tags = tag.split('|')
if len(sub_tags) == 1:
if 'AGENT' in sub_tags[0]:
agent_tags.append(sub_tags[0])
patient_tags.append('O')
else:
agent_tags.append('O')
patient_tags.append(sub_tags[0])
else:
agent_tags.append(sub_tags[0])
patient_tags.append(sub_tags[1])
assert len(agent_tags) == len(patient_tags)
agents = []
patients = []
agent_offsets = {}
patient_offsets = {}
current_agent = []
current_patient = []
for k in range(actual_length):
if len(current_agent) > 0 and (agent_tags[k] == 'O' or agent_tags[k].startswith('B-')):
# start_span = ori_example['span'][current_agent[0]]
# end_span = ori_example['span'][current_agent[-1]]
#
# text_span = ori_text[start_span[0]: end_span[1] + 1]
# agents.append(text_span)
# agent_offsets[text_span] = [start_span[0], end_span[1] + 1]
start_char = ori_example.sentence.tokens[current_agent[0]].start_char_offset()
end_char = ori_example.sentence.tokens[current_agent[-1]].end_char_offset()
text_span = ori_example.sentence.get_text(start_char, end_char)
agents.append(text_span)
agent_offsets[text_span] = [start_char, end_char]
current_agent = []
if 'AGENT' in agent_tags[k]:
current_agent.append(k)
if len(current_patient) > 0 and (patient_tags[k] == 'O' or patient_tags[k].startswith('B-')):
# start_span = ori_example['span'][current_patient[0]]
# end_span = ori_example['span'][current_patient[-1]]
#
# text_span = ori_text[start_span[0]: end_span[1] + 1]
# patients.append(text_span)
# patient_offsets[text_span] = [start_span[0], end_span[1] + 1]
start_char = ori_example.sentence.tokens[current_patient[0]].start_char_offset()
end_char = ori_example.sentence.tokens[current_patient[-1]].end_char_offset()
text_span = ori_example.sentence.get_text(start_char, end_char)
patients.append(text_span)
patient_offsets[text_span] = [start_char, end_char]
current_patient = []
if 'PATIENT' in patient_tags[k]:
current_patient.append(k)
if len(current_agent) > 0:
# start_span = ori_example['span'][current_agent[0]]
# end_span = ori_example['span'][current_agent[-1]]
#
# text_span = ori_text[start_span[0]: end_span[1] + 1]
# agents.append(text_span)
# agent_offsets[text_span] = [start_span[0], end_span[1] + 1]
start_char = ori_example.sentence.tokens[current_agent[0]].start_char_offset()
end_char = ori_example.sentence.tokens[current_agent[-1]].end_char_offset()
text_span = ori_example.sentence.get_text(start_char, end_char)
agents.append(text_span)
agent_offsets[text_span] = [start_char, end_char]
if len(current_patient) > 0:
# start_span = ori_example['span'][current_patient[0]]
# end_span = ori_example['span'][current_patient[-1]]
#
# text_span = ori_text[start_span[0]: end_span[1] + 1]
# patients.append(text_span)
# patient_offsets[text_span] = [start_span[0], end_span[1] + 1]
start_char = ori_example.sentence.tokens[current_patient[0]].start_char_offset()
end_char = ori_example.sentence.tokens[current_patient[-1]].end_char_offset()
text_span = ori_example.sentence.get_text(start_char, end_char)
patients.append(text_span)
patient_offsets[text_span] = [start_char, end_char]
if seperate_outputs:
return agents, patients, agent_offsets, patient_offsets
else:
return agents + patients
class TriggerGenerator(object):
def __init__(self, opt, xlmr_model, tokenizer, examples, docs, label_map, biw2v_map, is_eval_data=False):
"""
:type tokenizer: python.clever.nlplingo.tasks.sequence.tokenizer.Tokenizer
:type examples: list[nlplingo.tasks.sequence.example.SequenceExample]
:type docs: list[nlplingo.text.text_theory.Document]
:type label_map: dict[str, int]
"""
# self.opt = opt
self.tokenizer = tokenizer
self.biw2v_map = biw2v_map
self.xlmr_model = xlmr_model
self.xlmr_model.eval()
self.is_eval_data = is_eval_data
self.encoded_data = self.encode_data(examples, docs, label_map, False) # In Oregon code, will return: {'english': list, 'arabic': list}
#self.num_examples = len(self.encoded_data['english'] + self.encoded_data['arabic'])
self.num_examples = len(self.encoded_data)
self.data_batches = self.create_batches(self.encoded_data, opt['batch_size'])
self.num_batches = len(self.data_batches)
def encode_trigger_example(self, example, doc, label_map):
"""modeled after: python.clever.event_models.uoregon.models.pipeline._01.iterators.EDIterator.encode_example
In Oregon's code:
#### Before encoding
'triggers': [[21, 22], [34, 35]]
'event-types': ['harmful|material', 'neutral|material']
'word': ['On', 'the', 'afternoon', 'of', 'Oct.', '7', ',', '1974', ',', 'a', 'mob', 'of', '200', 'enraged', 'whites', ',', 'many', 'of', 'them', 'students', ',', 'closed', 'in', 'on', 'a', 'bus', 'filled', 'with', 'black', 'students', 'that', 'was', 'trying', 'to', 'pull', 'away', 'from', 'the', 'local', 'high', 'school', '.']
'lemma': ['on', 'the', 'afternoon', 'of', 'Oct.', '7', ',', '1974', ',', 'a', 'mob', 'of', '200', 'enrage', 'white', ',', 'many', 'of', 'they', 'student', ',', 'close', 'in', 'on', 'a', 'bus', 'fill', 'with', 'black', 'student', 'that', 'be', 'try', 'to', 'pull', 'away', 'from', 'the', 'local', 'high', 'school', '.']
'upos': ['ADP', 'DET', 'NOUN', 'ADP', 'PROPN', 'NUM', 'PUNCT', 'NUM', 'PUNCT', 'DET', 'NOUN', 'ADP', 'NUM', 'VERB', 'NOUN', 'PUNCT', 'ADJ', 'ADP', 'PRON', 'NOUN', 'PUNCT', 'VERB', 'ADV', 'ADP', 'DET', 'NOUN', 'VERB', 'ADP', 'ADJ', 'NOUN', 'PRON', 'AUX', 'VERB', 'PART', 'VERB', 'ADP', 'ADP', 'DET', 'ADJ', 'ADJ', 'NOUN', 'PUNCT']
'xpos': ['IN', 'DT', 'NN', 'IN', 'NNP', 'CD', ',', 'CD', ',', 'DT', 'NN', 'IN', 'CD', 'VBD', 'NNS', ',', 'JJ', 'IN', 'PRP', 'NNS', ',', 'VBD', 'RB', 'IN', 'DT', 'NN', 'VBN', 'IN', 'JJ', 'NNS', 'WDT', 'VBD', 'VBG', 'TO', 'VB', 'RP', 'IN', 'DT', 'JJ', 'JJ', 'NN', '.']
'morph': ['_', 'Definite=Def|PronType=Art', 'Number=Sing', '_', 'Number=Sing', 'NumType=Card', '_', 'NumType=Card', '_', 'Definite=Ind|PronType=Art', 'Number=Sing', '_', 'NumType=Card', 'Tense=Past|VerbForm=Fin', 'Number=Plur', '_', 'Degree=Pos', '_', 'Case=Acc|Number=Plur|Person=3|PronType=Prs', 'Number=Plur', '_', 'Mood=Ind|Tense=Past|VerbForm=Fin', '_', '_', 'Definite=Ind|PronType=Art', 'Number=Sing', 'Tense=Past|VerbForm=Part', '_', 'Degree=Pos', 'Number=Plur', 'PronType=Rel', 'Mood=Ind|Number=Sing|Person=3|Tense=Past|VerbForm=Fin', 'Tense=Pres|VerbForm=Part', '_', 'VerbForm=Inf', '_', '_', 'Definite=Def|PronType=Art', 'Degree=Pos', 'Degree=Pos', 'Number=Sing', '_']
'head': [3, 3, 14, 5, 3, 5, 5, 5, 14, 11, 14, 13, 11, 0, 14, 15, 15, 19, 17, 17, 22, 14, 22, 26, 26, 22, 26, 30, 30, 27, 33, 33, 30, 35, 33, 35, 41, 41, 41, 41, 35, 14]
'dep_rel': ['case', 'det', 'obl', 'case', 'nmod', 'nummod', 'punct', 'nummod', 'punct', 'det', 'nsubj', 'case', 'nmod', 'root', 'obj', 'punct', 'appos', 'case', 'nmod', 'nmod', 'punct', 'conj', 'advmod', 'case', 'det', 'obl', 'acl', 'case', 'amod', 'obl', 'nsubj', 'aux', 'acl', 'mark', 'xcomp', 'compound', 'case', 'det', 'amod', 'amod', 'obl', 'punct']
'ner': ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']
'span': [[0, 1], [3, 5], [7, 15], [17, 18], [20, 23], [25, 25], [26, 26], [28, 31], [32, 32], [34, 34], [36, 38], [40, 41], [43, 45], [47, 53], [55, 60], [61, 61], [63, 66], [68, 69], [71, 74], [76, 83], [84, 84], [86, 91], [93, 94], [96, 97], [99, 99], [101, 103], [105, 110], [112, 115], [117, 121], [123, 130], [132, 135], [137, 139], [141, 146], [148, 149], [151, 154], [156, 159], [161, 164], [166, 168], [170, 174], [176, 179], [181, 186], [187, 187]]
#### After encoding
xlmr_ids:
tensor([ 0, 2161, 70, 157109, 111, 33649, 5, 361, 6,
4, 27898, 6, 4, 10, 81158, 111, 1781, 22,
29838, 71, 35011, 7, 6, 4, 5941, 111, 2856,
25921, 6, 4, 155738, 23, 98, 10, 5324, 152382,
678, 22556, 25921, 450, 509, 31577, 47, 50065, 16065,
1295, 70, 4000, 11192, 10696, 6, 5, 2])
biw2v_ids: len=42, [11, 3, 2078, 5, 2331, 244, 4, 3939, 4, 10, 12556, 5, 1962, 25452, 17003, 4, 224, 5, 148, 1638, 4, 1393, 9, 11, 10, 2610, 5094, 19, 3225, 1638, 14, 24, 1931, 8, 5169, 1828, 30, 3, 245, 210, 1096, 6]
retrieve_ids, len=42, [1, 2, 3, 4, 5, 7, 9, 10, 12, 13, 14, 15, 16, 17, 20, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 51]
upos_ids, len=42, [2, 3, 4, 2, 5, 6, 7, 6, 7, 3, 4, 2, 6, 8, 4, 7, 9, 2, 10, 4, 7, 8, 11, 2, 3, 4, 8, 2, 9, 4, 10, 12, 8, 13, 8, 2, 2, 3, 9, 9, 4, 7]
xpos_ids, len=42, [2, 3, 4, 2, 5, 6, 7, 6, 7, 3, 4, 2, 6, 8, 9, 7, 10, 2, 11, 9, 7, 8, 12, 2, 3, 4, 13, 2, 10, 9, 14, 8, 15, 16, 17, 18, 2, 3, 10, 10, 4, 19]
head_ids, len=42, [3, 3, 14, 5, 3, 5, 5, 5, 14, 11, 14, 13, 11, 0, 14, 15, 15, 19, 17, 17, 22, 14, 22, 26, 26, 22, 26, 30, 30, 27, 33, 33, 30, 35, 33, 35, 41, 41, 41, 41, 35, 14]
dep_rel_ids, len=42, [2, 3, 4, 2, 5, 6, 7, 6, 7, 3, 8, 2, 5, 9, 10, 7, 11, 2, 5, 5, 7, 12, 13, 2, 3, 4, 14, 2, 15, 4, 8, 16, 14, 17, 18, 19, 2, 3, 15, 15, 4, 7]
ner_ids, len=42, [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
lang_weight 1.0
ED_labels, len=42, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 0, 0, 0, 0, 0, 0]
:type example: nlplingo.tasks.sequence.example.SequenceExample
:type doc: nlplingo.text.text_theory.Document
:type label_map: dict[str, int]
"""
word_list = example.words
ED_labels = [label_map[label_string] for label_string in example.labels]
xlmr_ids, input_mask, label_ids, retrieve_ids, subwords = self.tokenizer.tokenize_to_ids(word_list, ED_labels)
#xlmr_ids, retrieve_ids = xlmr_tokenizer.get_token_ids(self.xlmr_model, word_list)
xpos_ids = [xpos_map.get(token.pos_tag, xpos_map['[UNK]']) for token in example.sentence.tokens]
upos_ids = [upos_map.get(token.pos_tag_alternate, upos_map['[UNK]']) for token in example.sentence.tokens]
#upos_ids = [upos_map[map_xpos_to_upos(token.pos_tag)] for token in example.sentence.tokens]
#print('len(doc.adj_mats)=', len(doc.adj_mats))
#print('example.sentence_index=', example.sentence_index)
head_ids = [token.dep_relations[0].connecting_token_index for token in example.sentence.tokens]
dep_rel_ids = [deprel_map.get(token.dep_relations[0].dep_name, deprel_map['[UNK]']) for token in example.sentence.tokens]
# head_ids = doc.adj_mats[example.sentence_index][0]
# dep_rel_ids = doc.adj_mats[example.sentence_index][1]
ner_list = ['O'] * len(word_list)
ner_ids = [ner_map.get(ner) for ner in ner_list]
lang_weight = 1.0
#biw2v_ids = [1] * len(word_list) # TODO
biw2v_ids = [self.biw2v_map.get(word.lower(), self.biw2v_map[UNK_TOKEN]) for word in word_list]
print('example.words=', example.words)
print('example.labels=', example.labels)
#print('subwords=', subwords)
print('xlmr_ids=', xlmr_ids)
#print('input_mask=', input_mask)
#print('label_ids=', label_ids)
#print('biw2v_ids=', biw2v_ids)
print('retrieve_ids=', retrieve_ids)
#print('upos_ids=', upos_ids)
#print('xpos_ids=', xpos_ids)
#print('head_ids=', head_ids)
#print('dep_rel_ids=', dep_rel_ids)
#print('ner_ids=', ner_ids)
#print('lang_weight=', lang_weight)
#print('ED_labels=', ED_labels)
#sys.exit(0)
return xlmr_ids, input_mask, label_ids, biw2v_ids, retrieve_ids, upos_ids, xpos_ids, head_ids, dep_rel_ids, ner_ids, lang_weight, ED_labels
def encode_data(self, examples, docs, label_map, is_eval_data):
""" For each example in examples, "generate features" to become encoded_ex. Then shuffle if not eval data
:type examples: list[nlplingo.tasks.sequence.example.SequenceExample]
:type docs: list[nlplingo.text.text_theory.Document]
:type is_eval_data: bool
"""
docid_to_doc = dict()
for doc in docs:
docid_to_doc[doc.docid] = doc
encoded_examples = []
for example in examples:
encoded_ex = self.encode_trigger_example(example, docid_to_doc[example.docid], label_map)
encoded_examples.append(encoded_ex)
if not is_eval_data:
encoded_examples = shuffle_list(encoded_examples)
return encoded_examples
def create_batches(self, examples, batch_size):
""" modified from uoregon code, where I assume all examples are English
If: l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
Then batches = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
"""
batches = [examples[i:i + batch_size] for i in range(0, len(examples), batch_size)]
return batches
def __len__(self):
return len(self.data_batches)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data_batches):
raise IndexError
batch = self.data_batches[key]
batch_size = len(batch)
# E.g. if: l = [('1a','1b'),('2a','2b'),('3a','3b'),('4a','4b')]
# zip(*l): [('1a', '2a', '3a', '4a'), ('1b', '2b', '3b', '4b')]
batch = list(zip(*batch))
lens = [len(x) for x in batch[0]] # batch[0] is list[xlmr_ids], so this gives: [len(xlmr_ids) for each example]
batch, _ = sort_all(batch, lens) # sort elements in batch by decreasing order of their len
# convert to tensors
#xlmr_ids = do_padding(batch[0], batch_size)
#xlmr_ids = do_padding(batch[0], batch_size, pad_id=self.tokenizer.pad_token)
xlmr_ids = do_padding(batch[0], batch_size)
input_mask = do_padding(batch[1], batch_size)
label_ids = do_padding(batch[2], batch_size, pad_id=self.tokenizer.pad_token_label_id)
biw2v_ids = do_padding(batch[3], batch_size)
# ***********************************************
retrieve_ids = do_padding(batch[4], batch_size)
upos_ids = do_padding(batch[5], batch_size)
xpos_ids = do_padding(batch[6], batch_size)
head_ids = do_padding(batch[7], batch_size)
deprel_ids = do_padding(batch[8], batch_size)
ner_ids = do_padding(batch[9], batch_size)
lang_weights = torch.Tensor(batch[10])
ED_labels = do_padding(batch[11], batch_size)
# If: retrieve_ids = torch.LongTensor([1, 2, 3, 0, 5])
# Then: torch.eq(retrieve_ids, 0) produces: tensor([False, False, False, True, False])
pad_masks = torch.eq(retrieve_ids, 0)
print('xlmr_ids=', xlmr_ids)
print('input_mask=', input_mask)
print('retrieve_ids=', retrieve_ids)
print('ED_labels=', ED_labels)
print('pad_masks=', pad_masks)
return (
xlmr_ids, input_mask, label_ids, biw2v_ids, retrieve_ids, upos_ids, xpos_ids, head_ids, deprel_ids, ner_ids, lang_weights,
ED_labels,
pad_masks)
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i) # yield a batch
def shuffle_batches(self):
indices = list(range(len(self.data_batches)))
random.shuffle(indices)
self.data_batches = [self.data_batches[i] for i in indices]
class ArgumentGenerator:
def __init__(self, opt, xlmr_model, examples, docs, label_map, biw2v_map, is_eval_data=False):
# self.opt = opt
self.biw2v_map = biw2v_map
self.xlmr_model = xlmr_model
self.xlmr_model.eval()
self.is_eval_data = is_eval_data
# self.data_path = data_path
self.id2ori_example = {}
self.id2tag = dict([(v, k) for k, v in ARGUMENT_TAG_MAP.items()])
self.encoded_data = self.encode_data(examples, docs, label_map, False)
#self.num_examples = len(self.encoded_data['english'] + self.encoded_data['arabic'])
self.num_examples = len(self.encoded_data)
self.data_batches = self.create_batches(self.encoded_data, opt['batch_size'])
self.num_batches = len(self.data_batches)
def encode_argument_example(self, example, doc, label_map):
""" modeled after: python.clever.event_models.uoregon.models.pipeline._01.iterators.ArgumentIterator.encode_example
example of "span": [[0, 7], [9, 17], [19, 20], [22, 27], ...
There are as many sublists as there are tokens. Each sublist refers to the [starting-char, ending-char] of each token
"text": raw text string of the sentence
example of "trigger": [13]
example of "agents": [[0, 1]]
example of "patients": [[20], [28, 29]]
###############
example['word'] ['A', 'number', 'of', 'National', 'Football', 'League', '(', 'NFL', ')', 'players', 'have', 'protested', 'after', 'being', 'attacked', 'by', 'the', 'US', 'president', '.']
example['upos'] ['DET', 'NOUN', 'ADP', 'PROPN', 'PROPN', 'PROPN', 'PUNCT', 'PROPN', 'PUNCT', 'NOUN', 'AUX', 'VERB', 'SCONJ', 'AUX', 'VERB', 'ADP', 'DET', 'PROPN', 'PROPN', 'PUNCT']
example['xpos'] ['DT', 'NN', 'IN', 'NNP', 'NNP', 'NNP', '-LRB-', 'NNP', '-RRB-', 'NNS', 'VBP', 'VBN', 'IN', 'VBG', 'VBN', 'IN', 'DT', 'NNP', 'NNP', '.']
example['head'] [2, 12, 6, 6, 6, 2, 8, 6, 8, 12, 12, 0, 15, 15, 12, 19, 19, 19, 15, 12]
example['dep_rel'] ['det', 'nsubj', 'case', 'compound', 'compound', 'nmod', 'punct', 'appos', 'punct', 'nsubj', 'aux', 'root', 'mark', 'aux', 'advcl', 'case', 'det', 'compound', 'obl', 'punct']
example['ner'] ['O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O']
lang_weight 1.0
trigger_toks [11]
trigger_word protested
###############
xlmr_ids tensor([ 0, 62, 14012, 111, 9907, 98809, 19175, 15, 186831,
1388, 92865, 765, 18782, 297, 7103, 8035, 52875, 297,
390, 70, 7082, 13918, 6, 5, 2, 2, 18782,
297, 2])
NOTE: the trailing [2, 18782, 297, 2] comes from the trigger word "protested"
biw2v_ids [10, 166, 5, 76, 5067, 1764, 16, 105158, 17, 5313, 40, 7923, 125, 200, 2781, 18, 3, 99, 84, 6]
retrieve_ids [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 16, 18, 19, 20, 21, 23]
upos_ids [3, 4, 2, 5, 5, 5, 7, 5, 7, 4, 12, 8, 15, 12, 8, 2, 3, 5, 5, 7]
xpos_ids [3, 4, 2, 5, 5, 5, 36, 5, 37, 9, 30, 13, 2, 15, 13, 2, 3, 5, 5, 19]
head_ids [2, 12, 6, 6, 6, 2, 8, 6, 8, 12, 12, 0, 15, 15, 12, 19, 19, 19, 15, 12]
dep_rel_ids [3, 8, 2, 19, 19, 5, 7, 11, 7, 8, 16, 9, 17, 16, 26, 2, 3, 19, 4, 7]
ner_ids [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
lang_weight 1.0
trigger_tok 11
entity_tags [4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 3, 6, 3, 3, 3, 3, 3]
eid 0
"""
# example.words is marked up, like so: w0 w1 w2 $$$ w3 w4 $$$$ w5 w6,
# where '$$$' surrounds the anchor span
# We find the indices of the markers. In the above example, we should get: [3, 6]
marker_indices = find_token_indices_of_markers(example.words)
assert len(marker_indices) == 2
words_without_markers = remove_tokens_at_indices(example.words, marker_indices)
labels_without_markers = remove_tokens_at_indices(example.labels, marker_indices)
anchor_start_token_index = marker_indices[0]
anchor_end_token_index = marker_indices[1] - 2
# Why is there a -2 in the above? Assume that you have the following marked up word sequence:
# 0 1 2 3 4 5 6 7 8 (token indices of marked up sequence)
# 0 1 2 $$$ 3 4 $$$ 5 6 (original token indices)
# On the above:
# * marker_indices[0] == 3
# * marker_indices[1] == 6 (you need to minus by 2, to get 4, which is the original token index)
trigger_word = words_without_markers[anchor_start_token_index]
word_list = words_without_markers
entity_tags = [label_map[label_string] for label_string in labels_without_markers]
xlmr_ids, retrieve_ids = xlmr_tokenizer.get_token_ids(self.xlmr_model, word_list, trigger_word) # 'trigger_word' is raw text string of the trigger
xpos_ids = [xpos_map.get(token.pos_tag, xpos_map['[UNK]']) for token in example.sentence.tokens]
upos_ids = [upos_map.get(token.pos_tag_alternate, upos_map['[UNK]']) for token in example.sentence.tokens]
#upos_ids = [upos_map[map_xpos_to_upos(token.pos_tag)] for token in example.sentence.tokens]
head_ids = [token.dep_relations[0].connecting_token_index for token in example.sentence.tokens]
dep_rel_ids = [deprel_map.get(token.dep_relations[0].dep_name, deprel_map['[UNK]']) for token in example.sentence.tokens]
# head_ids = doc.adj_mats[example.sentence_index][0]
# dep_rel_ids = doc.adj_mats[example.sentence_index][1]
ner_list = ['O'] * len(word_list)
ner_ids = [ner_map.get(ner) for ner in ner_list]
lang_weight = 1.0
#biw2v_ids = [1] * len(word_list) # TODO
biw2v_ids = [self.biw2v_map.get(word.lower(), self.biw2v_map[UNK_TOKEN]) for word in word_list]
#######
# word_list = example['word']
# upos_list = example['upos']
# xpos_list = example['xpos']
# head_list = example['head']
# dep_rel_list = example['dep_rel']
# ner_list = example['ner']
# lang_weight = 1.0 if opt['co_train_lambda'] == 0 or example['lang'] == 'english' else opt['co_train_lambda']
# # *****************************
# trigger_toks = example['trigger']
#
# # get raw text string of the trigger
# trigger_word = example['text'][
# example['span'][int(trigger_toks[0])][0]: example['span'][int(trigger_toks[-1])][1] + 1]
#
# xlmr_ids, retrieve_ids = xlmr_tokenizer.get_token_ids(self.xlmr_model, word_list, trigger_word)
# # ****** biw2v ************
# biw2v_ids = [biw2v_map.get(word.lower(), biw2v_map[UNK_TOKEN]) for word in word_list]
# # *****************************
# upos_ids = [upos_map.get(upos, upos_map[UNK_TOKEN]) for upos in upos_list]
# xpos_ids = [xpos_map.get(xpos, xpos_map[UNK_TOKEN]) for xpos in xpos_list]
# head_ids = head_list
# dep_rel_ids = [deprel_map.get(dep_rel, deprel_map[UNK_TOKEN]) for dep_rel in dep_rel_list]
# ner_ids = [ner_map.get(ner, ner_map[UNK_TOKEN]) for ner in ner_list]
#
# entity_tags = get_bio_tags(
# agent_extents=example['agents'],
# patient_extents=example['patients'],
# word_list=word_list
# )
# trigger_tok = example['trigger'][0]
#
eid = len(self.id2ori_example)
self.id2ori_example[eid] = example
return xlmr_ids, biw2v_ids, retrieve_ids, upos_ids, xpos_ids, head_ids, dep_rel_ids, ner_ids, lang_weight, anchor_start_token_index, entity_tags, eid
def encode_data(self, examples, docs, label_map, is_eval_data):
""" For each example in examples, "generate features" to become encoded_ex. Then shuffle if not eval data
:type examples: list[nlplingo.tasks.sequence.example.SequenceExample]
:type docs: list[nlplingo.text.text_theory.Document]
:type is_eval_data: bool
"""
docid_to_doc = dict()
for doc in docs:
docid_to_doc[doc.docid] = doc
encoded_examples = []
for example in examples:
encoded_ex = self.encode_argument_example(example, docid_to_doc[example.docid], label_map)
encoded_examples.append(encoded_ex)
if not is_eval_data:
encoded_examples = shuffle_list(encoded_examples)
return encoded_examples
def create_batches(self, examples, batch_size):
""" modified from uoregon code, where I assume all examples are English
If: l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
Then batches = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
"""
batches = [examples[i:i + batch_size] for i in range(0, len(examples), batch_size)]
return batches
def __len__(self):
return len(self.data_batches)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data_batches):
raise IndexError
batch = self.data_batches[key]
batch_size = len(batch)
batch = list(zip(*batch))
lens = [len(x) for x in batch[0]] # batch[0] is list[xlmr_ids], so this gives: [len(xlmr_ids) for each example]
batch, _ = sort_all(batch, lens) # sort elements in batch by decreasing order of their len
# xlmr_ids, retrieve_ids, upos_ids, xpos_ids, head_ids, dep_rel_ids, trigger_tok, entity_tags
# convert to tensors
xlmr_ids = do_padding(batch[0], batch_size)
biw2v_ids = do_padding(batch[1], batch_size)
# **************************************************
retrieve_ids = do_padding(batch[2], batch_size)
upos_ids = do_padding(batch[3], batch_size)
xpos_ids = do_padding(batch[4], batch_size)
head_ids = do_padding(batch[5], batch_size)
deprel_ids = do_padding(batch[6], batch_size)
ner_ids = do_padding(batch[7], batch_size)
lang_weights = torch.Tensor(batch[8])
triggers = torch.Tensor(batch[9])
entity_tags = do_padding(batch[10], batch_size)
eid = torch.Tensor(batch[11])
pad_masks = torch.eq(retrieve_ids, 0)
return (
xlmr_ids, biw2v_ids, retrieve_ids, upos_ids, xpos_ids, head_ids, deprel_ids, ner_ids, lang_weights,
triggers, entity_tags,
eid, pad_masks
)
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def shuffle_batches(self):
indices = list(range(len(self.data_batches)))
random.shuffle(indices)
self.data_batches = [self.data_batches[i] for i in indices]
class PipelineGenerator(object):
"""
eval -> PipelineTrainer -> PipelineIterator
"""
def __init__(self, xlmr_model, examples, docs):
# e.g. data_path = datapoints/data/arabic-abstract-sample/arabic-abstract-sample.pipeline.test (this is already parsed by stanfordnlp)
#print('Using {} for pipeline iterator...'.format(data_path))
#self.opt = opt
self.xlmr_model = xlmr_model # This is the XLMRModel.from_pretrained
self.xlmr_model.eval()
#self.data_path = data_path
self.id2ori_example = {}
self.encoded_data = self.encode_data(examples, docs)
self.num_examples = len(self.encoded_data)
self.data_batches = self.create_batches(self.encoded_data, 8)
self.num_batches = len(self.data_batches)
def encode_pipeline_example(self, example, doc):
#example['norm2ori_offsetmap'] = {int(k): v for k, v in example['norm2ori_offsetmap'].items()} # TODO
word_list = example.words
xlmr_ids, retrieve_ids = xlmr_tokenizer.get_token_ids(self.xlmr_model, word_list)
biw2v_ids = [1] * len(word_list) # TODO
# biw2v_ids = [biw2v_map.get(word.lower(), biw2v_map[UNK_TOKEN]) for word in word_list]
xpos_ids = [xpos_map.get(token.pos_tag, xpos_map['[UNK]']) for token in example.sentence.tokens]
upos_ids = [upos_map.get(token.pos_tag_alternate, upos_map['[UNK]']) for token in example.sentence.tokens]
# upos_ids = [upos_map[map_xpos_to_upos(token.pos_tag)] for token in example.sentence.tokens]
head_ids = [token.dep_relations[0].connecting_token_index for token in example.sentence.tokens]
dep_rel_ids = [deprel_map.get(token.dep_relations[0].dep_name, deprel_map['[UNK]']) for token in example.sentence.tokens]
# head_ids = doc.adj_mats[example.sentence_index][0]
# dep_rel_ids = doc.adj_mats[example.sentence_index][1]
ner_list = ['O'] * len(word_list)
ner_ids = [ner_map.get(ner) for ner in ner_list]
eid = len(self.id2ori_example)
self.id2ori_example[eid] = example
return xlmr_ids, biw2v_ids, retrieve_ids, upos_ids, xpos_ids, head_ids, dep_rel_ids, ner_ids, eid
def encode_data(self, examples, docs):
"""
:type examples: list[nlplingo.tasks.sequence.example.SequenceExample]
:type docs: list[nlplingo.text.text_theory.Document]
"""
# if self.opt['input_lang'] != 'english' and self.opt['use_dep2sent']:
# data = read_pickle(self.data_path) # alternative data permutated by dep2sent model
# else:
# data = read_json(self.data_path)
docid_to_doc = dict()
for doc in docs:
docid_to_doc[doc.docid] = doc
encoded_examples = []
for example in examples:
encoded_ex = self.encode_pipeline_example(example, docid_to_doc[example.docid])
encoded_examples.append(encoded_ex)
return encoded_examples
def create_batches(self, examples, batch_size):
batches = [examples[i:i + batch_size] for i in range(0, len(examples), batch_size)]
return batches
def __len__(self):
return len(self.data_batches)
def __getitem__(self, key):
""" Get a batch with index. """
if not isinstance(key, int):
raise TypeError
if key < 0 or key >= len(self.data_batches):
raise IndexError
batch = self.data_batches[key]
batch_size = len(batch)
batch = list(zip(*batch))
lens = [len(x) for x in batch[0]]
batch, _ = sort_all(batch, lens)
# xlmr_ids, retrieve_ids, upos_ids, xpos_ids, head_ids, dep_rel_ids, eid
# convert to tensors
xlmr_ids = do_padding(batch[0], batch_size)
biw2v_ids = do_padding(batch[1], batch_size)
# **********************************************
retrieve_ids = do_padding(batch[2], batch_size)
upos_ids = do_padding(batch[3], batch_size)
xpos_ids = do_padding(batch[4], batch_size)
head_ids = do_padding(batch[5], batch_size)
deprel_ids = do_padding(batch[6], batch_size)
ner_ids = do_padding(batch[7], batch_size)
eid = torch.Tensor(batch[8])
pad_masks = torch.eq(retrieve_ids, 0)
return (
xlmr_ids, biw2v_ids, retrieve_ids, upos_ids, xpos_ids, head_ids, deprel_ids, ner_ids, eid, pad_masks
)
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def shuffle_batches(self):
indices = list(range(len(self.data_batches)))
random.shuffle(indices)
self.data_batches = [self.data_batches[i] for i in indices]
|
# Autogenerated from KST: please remove this line if doing any edits by hand!
import unittest
from params_pass_array_str import _schema
class TestParamsPassArrayStr(unittest.TestCase):
def test_params_pass_array_str(self):
r = _schema.parse_file('src/term_strz.bin')
self.assertEqual(len(r.pass_str_array.strs), 3)
self.assertEqual(r.pass_str_array.strs[0], u"fo")
self.assertEqual(r.pass_str_array.strs[1], u"o|")
self.assertEqual(r.pass_str_array.strs[2], u"ba")
self.assertEqual(len(r.pass_str_array_calc.strs), 2)
self.assertEqual(r.pass_str_array_calc.strs[0], u"aB")
self.assertEqual(r.pass_str_array_calc.strs[1], u"Cd")
|
# The MIT License
#
# Copyright (c) 2008 James Piechota
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import os
import unittest
import ns.bridge.io.CDLReader as CDLReader
import ns.evolve.Genotype as Genotype
import ns.tests.TestUtil as TestUtil
class TestMutateTimer(unittest.TestCase):
def setUp(self):
input = "R:/massive/testdata/cdl/man/CDL/timer.cdl"
self.agentSpec = CDLReader.read(input, CDLReader.kEvolveTokens)
self.geno = Genotype.Genotype(self.agentSpec)
self.geno.rand = TestUtil.NotRandom()
def tearDown(self):
try:
pass
except:
pass
def testNoMutate(self):
''' No Timer parameters should mutate.'''
nodes = self.agentSpec.brain.nodes()
self.assertEqual(1, len(nodes))
self.geno.rand.getContext("shouldMutate").floatDefault = 0.1
self.geno.getNode("timer").mutateParameters()
node = self.agentSpec.brain.getNode("timer")
self.assertAlmostEqual(1.0, node.rate)
self.assertEqual("if_stopped", node.trigger)
self.assertEqual([0.0, 1.0], node.range)
self.assertEqual(False, node.endless)
def testAllMutate(self):
''' All Timer parameters should mutate.'''
nodes = self.agentSpec.brain.nodes()
self.assertEqual(1, len(nodes))
self.geno.rand.getContext("shouldMutate").floatDefault = 0.0
self.geno.getNode("timer").mutateParameters()
node = self.agentSpec.brain.getNode("timer")
self.assertNotAlmostEqual(1.0, node.rate)
self.assertNotEqual("if_stopped", node.trigger)
self.assertNotEqual([0.0, 1.0], node.range)
self.assertNotEqual(False, node.endless)
def testMutateRate(self):
''' Timer rate should mutate.
First: 0.5
'''
self.geno.rand.default.floatDefault = 0.5
self.geno.floatMutationRate = 0.99
self.geno.getNode("timer").mutateRate()
self.assertAlmostEqual(0.5, self.agentSpec.brain.getNode("timer").rate)
def testMutateTrigger(self):
''' Timer trigger should mutate.
First: always
'''
self.geno.rand.default.floatDefault = 0.0
self.geno.rand.default.intDefault = 1
self.geno.getNode("timer").mutateTrigger()
self.assertEqual("always", self.agentSpec.brain.getNode("timer").trigger)
def testMutateRange(self):
''' Output range should mutate.
First: [0.5, 0.75]
'''
self.geno.rand.getContext("shouldMutate").floatDefault = 0.0
self.geno.rand.getContext("mutateFloat").floatValues = [0.5, 0.25, 0.75]
self.geno.getNode("timer").mutateRange()
self.assertEqual([0.5, 0.75], self.agentSpec.brain.getNode("timer").range)
def testMutateEndless(self):
''' Defuzz endless should mutate.
First: True
'''
self.geno.boolMutationRate = 0.99
self.geno.getNode("timer").mutateEndless()
self.assertEqual(True, self.agentSpec.brain.getNode("timer").endless)
suite = unittest.TestLoader().loadTestsFromTestCase(TestMutateTimer)
|
"""Utils and fixtures to facilitate operation on metadata row in file browser in oneprovider web GUI.
"""
from selenium.common.exceptions import NoSuchElementException
from tests.gui.utils.core.common import PageObject
from tests.gui.utils.core.web_elements import InputWebElement, TextLabelWebElement, WebItem, WebItemsSequence, \
ButtonWebItem, WebElementsSequence, WebElementsSequenceItemWithText
from tests.gui.utils.core.web_objects import ButtonWebObject
from tests.gui.utils.generic import click_on_web_elem, find_web_elem_with_text, \
find_web_elem
__author__ = "Bartosz Walkowicz"
__copyright__ = "Copyright (C) 2017 ACK CYFRONET AGH"
__license__ = "This software is released under the MIT license cited in " \
"LICENSE.txt"
class _BasicMetadataEntry(PageObject):
attribute = id = TextLabelWebElement('th')
value = InputWebElement('td textarea.basic-value')
remove = ButtonWebItem('.oneicon-close')
def __str__(self):
return 'metadata basic entry'
class _BasicMetadataNewEntry(PageObject):
attribute = InputWebElement('th input[placeholder=Attribute]')
value = InputWebElement('td textarea[placeholder=Value]')
add = ButtonWebItem('.oneicon-add')
def __str__(self):
return 'metadata basic new entry in {}'.format(self.parent)
def is_valid(self):
return 'invalid' not in self.web_elem.get_attribute('class')
class _BasicMetadataPanel(PageObject):
new_entry = WebItem('tr.basic-new-entry', cls=_BasicMetadataNewEntry)
entries = WebItemsSequence('tr:not([class~=basic-new-entry])',
cls=_BasicMetadataEntry)
def __str__(self):
return 'basic metadata panel in {}'.format(self.parent)
class _MetadataEditPanel(PageObject):
text_area = InputWebElement('textarea')
status = TextLabelWebElement('.parse-status-panel')
def __str__(self):
return 'metadata edit panel in {}'.format(self.parent)
class _NavigationHeader(PageObject):
_tabs = WebElementsSequence('li')
basic = WebElementsSequenceItemWithText(seq=_tabs, text='BASIC',
cls=ButtonWebObject)
json = WebElementsSequenceItemWithText(seq=_tabs, text='JSON',
cls=ButtonWebObject)
rdf = WebElementsSequenceItemWithText(seq=_tabs, text='RDF',
cls=ButtonWebObject)
class MetadataRow(PageObject):
navigation = WebItem('ul.nav-tabs', cls=_NavigationHeader)
basic = WebItem('table.metadata-basic-table', cls=_BasicMetadataPanel)
json = WebItem('.metadata-json-editor', cls=_MetadataEditPanel)
rdf = WebItem('.metadata-xml-editor', cls=_MetadataEditPanel)
def __str__(self):
return 'metadata row in {}'.format(self.parent)
def save_all_changes(self):
return self._get_btn('save all changes')
def discard_changes(self):
return self._get_btn('discard changes')
def remove_metadata(self):
return self._get_btn('remove metadata')
def is_resource_load_error(self):
try:
self.web_elem.find_element_by_css_selector('.metadata-panel')
except NoSuchElementException:
find_web_elem(self.web_elem, '.resource-load-error',
'not found either metadata panel or resource load error')
return True
else:
return False
def _get_btn(self, name):
css_sel = '.save-metadata-row button'
err_msg = '{} btn not found in metadata row'.format(name)
btn = find_web_elem_with_text(self.web_elem, css_sel, name, err_msg)
return ButtonWebObject(self.driver, btn, self)
|
#!/usr/bin/env python
# Copyright 2017 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import itertools
import json
import os
import sys
from sdk_common import Atom, detect_category_violations, detect_collisions, gather_dependencies
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--id',
help='The atom\'s identifier',
required=True)
parser.add_argument('--out',
help='Path to the output file',
required=True)
parser.add_argument('--depfile',
help='Path to the depfile',
required=True)
parser.add_argument('--deps',
help='List of manifest paths for dependencies',
nargs='*')
parser.add_argument('--file',
help='A (destination <-- source) mapping',
action='append',
nargs=2)
parser.add_argument('--file-list',
help='A file containing destination=source mappings')
parser.add_argument('--gn-label',
help='GN label of the atom',
required=True)
parser.add_argument('--category',
help='Publication level',
required=True)
parser.add_argument('--meta',
help="Path to the atom's metadata file in the SDK",
default='',
required=False)
args = parser.parse_args()
# Gather the definitions of other atoms this atom depends on.
(deps, atoms) = gather_dependencies(args.deps)
# Build the list of files making up this atom.
extra_files = []
if args.file_list:
with open(args.file_list, 'r') as file_list_file:
for line in file_list_file.readlines():
extra_files.append(line.strip().split('=', 1))
files = []
for destination, source in itertools.chain(args.file, extra_files):
files.append({
'source': source,
'destination': destination,
})
atoms.update([Atom({
'id': args.id,
'meta': args.meta,
'gn-label': args.gn_label,
'category': args.category,
'deps': sorted(list(deps)),
'files': files,
})])
if detect_collisions(atoms):
print('Name collisions detected!')
return 1
if detect_category_violations(args.category, atoms):
print('Publication level violations detected!')
return 1
manifest = {
'ids': [args.id],
'atoms': map(lambda a: a.json, sorted(list(atoms))),
}
with open(os.path.abspath(args.out), 'w') as out:
json.dump(manifest, out, indent=2, sort_keys=True)
with open(args.depfile, 'w') as dep_file:
dep_file.write(args.out + ': ')
for destination, source in extra_files:
dep_file.write(source + ' ')
if __name__ == '__main__':
sys.exit(main())
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Ui_SendFileDialog.ui',
# licensing of 'Ui_SendFileDialog.ui' applies.
#
# Created: Fri Jan 10 21:11:54 2020
# by: pyside2-uic running on PySide2 5.13.2
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_SendFileDialog(object):
def setupUi(self, SendFileDialog):
SendFileDialog.setObjectName("SendFileDialog")
SendFileDialog.resize(271, 129)
self.verticalLayout = QtWidgets.QVBoxLayout(SendFileDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(SendFileDialog)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label)
self.filenameLabel = QtWidgets.QLabel(SendFileDialog)
self.filenameLabel.setText("")
self.filenameLabel.setObjectName("filenameLabel")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.filenameLabel)
self.label_2 = QtWidgets.QLabel(SendFileDialog)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.pathLabel = QtWidgets.QLabel(SendFileDialog)
self.pathLabel.setText("")
self.pathLabel.setObjectName("pathLabel")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.pathLabel)
self.label_3 = QtWidgets.QLabel(SendFileDialog)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.sizeLabel = QtWidgets.QLabel(SendFileDialog)
self.sizeLabel.setText("")
self.sizeLabel.setObjectName("sizeLabel")
self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.sizeLabel)
self.verticalLayout.addLayout(self.formLayout)
self.progressBar = QtWidgets.QProgressBar(SendFileDialog)
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
self.verticalLayout.addWidget(self.progressBar)
self.buttonBox = QtWidgets.QDialogButtonBox(SendFileDialog)
self.buttonBox.setEnabled(False)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.NoButton)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(SendFileDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), SendFileDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), SendFileDialog.reject)
QtCore.QMetaObject.connectSlotsByName(SendFileDialog)
def retranslateUi(self, SendFileDialog):
SendFileDialog.setWindowTitle(QtWidgets.QApplication.translate("SendFileDialog", "Send File", None, -1))
self.label.setText(QtWidgets.QApplication.translate("SendFileDialog", "Filename:", None, -1))
self.label_2.setText(QtWidgets.QApplication.translate("SendFileDialog", "Path:", None, -1))
self.label_3.setText(QtWidgets.QApplication.translate("SendFileDialog", "Size:", None, -1))
|
########################################
########################################
####### Author : Abhinandan Dubey (alivcor)
####### Stony Brook University
# perfect essays : 37, 118, 147,
import csv
import sys
import nltk
import numpy
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import numpy as np
from sklearn.random_projection import sparse_random_matrix
from scipy import spatial
from nltk import word_tokenize, pos_tag
import re
esstxt = "@ORGANIZATION1, Computers are great tools and a great piece of modern technology. Almost every family has them. About @PERCENT1 of my class has computers. So many people have them because their helpful and another current learning resource. Also it's a gr"
esstxt = "I think the use of computers is very good things in the present day @LOCATION2. I think this because people use it to research, stay in touch with friends and family, and music. These are all things adults and kids need a like to do. read on and I will explain why this is so important. My first reason why I think computers are good thing is because the are a good source to do research on a specific topic. Kids all over the @LOCATION1 use the internet to do class projects. Adults can use the computer to look up a certain food resipe. If you need help with something the computer is allways there. Some people even use the computer for a job. Computers can help with a lot of things. Another reason why computers are a big help because you can stay in touch with friends and family. A great thing to use on a computer is a web cam. With this you can see and talk to people on a lillte camray even if they are far away. You can also go on myspace of face book. Theare two websites that are used to post pictures and to talk to chosen friend. It is a good way for people to get to know you. A similar thing to this is instant messaging. You can use this to talk to friends and many on line. Don't stop reading there is more thing! My last reason why computer are sutch a great thing to have is because you can do do mutch with music on the computer. You can use the computer to listen to music, and put music on to and ipod or @NUM1 player. Some people use the computer to make music. You can get a progrem that you can make heats and put it together to make some kind of a song. music is a great thing that all age groups love. That is why I think haveing a computer is a very positive thing. You can do researching, say in touch with friends and family, and do a lot with music. There is so mutch more then thease @NUM2 thing that a computer is good for. I don't think the world would be the same without computers."
esstxt = re.sub(r'(\@)([A-Za-z]*)([\W]*[\d]*[\W]*)(\s)', " ", esstxt)
word = word_tokenize(esstxt)
pos = pos_tag(word)
print(pos)
grammar = "NP: {<IN>?<IN>?<RB>?<DT>?<JJ>*<NN>}"
grammar = """
NP: {<IN>?<IN>?<RB>?<DT>?<PRP>?<JJ.*>*<NN.*>+<IN>?<JJ>?<NN>?<CC>?<NN>?}
CP: {<JJR|JJS>}
VP: {<VB.*>}
COMP: {<DT>?<NP><RB>?<VP><DT>?<CP><THAN><DT>?<NP>}
"""
ncount = 0;
vcount = 0;
global ideas_np
global ideas_vp
ideas_np = []
ideas_vp = []
def extract_ideas(t, inp, ivp):
try:
t.label
except AttributeError:
return
else:
if t._label == "NP":
# print "t._label : " + t._label
# print "t[0] : " + str(t[0])
temp = []
for child in t:
npw_ = str(child[0])
npt_ = str(child[1])
# print "npw_ : " + npw_
# print "child[1] : " + str(child[1])
#TODO : HERE, ADD ONLY Nouns and adjective
if npt_ == "NP" or npt_ == "JJ" or npt_ == "NNS" or npt_ == "NN":
temp.append(npw_)
else:
print "Not appending " + npw_ + "because it is a " + npt_
inp.append(temp)
if t._label == "VP":
# print "t_label : " + t._label
# print "t[0] : " + str(t[0])
temp = []
for child in t:
vpw_ = str(child[0])
# print vpw_
temp.append(vpw_)
ivp.append(temp)
for child in t:
extract_ideas(child, inp, ivp)
return [inp, ivp]
#TODO : Detect variations in tense.
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
sents = sent_detector.tokenize(esstxt.strip())
for sent in sents:
words = word_tokenize(sent)
tagged_words = pos_tag(words)
cp = nltk.RegexpParser(grammar)
result = cp.parse(tagged_words)
# print result
# print "\n"
# print type(result)
# print "~~~~~~~~~~"
inp = []
ivp = []
inp, ivp = extract_ideas(result, inp, ivp)
ideas_np.append(inp)
ideas_vp.append(ivp)
# result.draw()
print ideas_np
print ideas_vp
print "Author presents the following key ideas: \n"
key_ideas = []
for nps in ideas_np:
for nptuples in nps:
# print "-",
# for wnps in nptuples:
# # print wnps
for nptuple in nptuples:
# nptxt = "".join(str(r) for v in nptuples for r in v)
nptxt = "".join(nptuple)
if not nptxt in key_ideas and not len(nptuple) == 0:
key_ideas.append(nptxt.lower())
# print "\n"
print " ".join(key_ideas)
#(\@)([A-Za-z]*)([\W]*[\d]*[\W]*)(\s)
# pf = open('pos_tags.txt', 'w')
# for i in range(0, len(pos)):
# pf.write(str(pos[i]) + '\n')
# # if ((pos[i][1]=='NN') or ( pos[i][1]=='NNS')):
# # ncount+=1
# # nf.write(pos[i][0]+'\n')
# # print(ncount)
# # for i in range(0,len(pos)):
# # if (pos[i][1]=='VB'):
# # vcount+=1
# # vf.write(pos[i][0]+'\n')
# # nf.close()
# # vf.close()
# pf.close()
|
# -*- coding: utf-8 -*-
"""
Mouse and keyboard listener, logs events to database.
--quiet prints out nothing
@author Erki Suurjaak
@created 06.04.2015
@modified 10.02.2021
"""
from __future__ import print_function
import datetime
import math
import Queue
import sys
import threading
import time
import traceback
import pynput
import conf
import db
DEBUG = False
class Listener(threading.Thread):
"""Runs mouse and keyboard listeners, and handles incoming commands."""
def __init__(self, inqueue, outqueue=None):
threading.Thread.__init__(self)
self.inqueue = inqueue
self.running = False
self.mouse_handler = None
self.key_handler = None
self.data_handler = DataHandler(getattr(outqueue, "put", lambda x: x))
def run(self):
self.running = True
while self.running:
command = self.inqueue.get()
if not command or not self.running: continue # while self.running
try: self.handle_command(command)
except Exception:
print("Error handling command %r" % command)
traceback.print_exc()
def handle_command(self, command):
if command.startswith("start ") or command.startswith("stop "):
action, category = command.split()
if category not in conf.InputFlags: return
# Event input (mouse|keyboard), None if category itself is input
input = next((k for k, vv in conf.InputEvents.items() if category in vv), None)
attr = conf.InputFlags[category]
on = ("start" == action)
if input and not getattr(conf, conf.InputFlags[input]): # Event input itself off
on = True # Force on regardless of event flag current state
# Set other input events off, as only a single one was explicitly enabled
for c, flag in ((c, conf.InputFlags[c]) for c in conf.InputEvents[input]):
setattr(conf, flag, False)
setattr(conf, attr, on)
# Toggle input on when turning event category on
if input and on: setattr(conf, conf.InputFlags[input], True)
elif not any(getattr(conf, conf.InputFlags.get(c), False)
for c in conf.InputEvents[input or category]): # Not any event on
if not input and on: # Turning input on
# Toggle all input events on since all were off
for c in conf.InputEvents[category]:
setattr(conf, conf.InputFlags[c], True)
elif input and not on: # Turning single event off
# Toggle entire input off since all input events are now off
setattr(conf, conf.InputFlags[input], False)
if bool(conf.MouseEnabled) != bool(self.mouse_handler):
if self.mouse_handler: self.mouse_handler = self.mouse_handler.stop()
else: self.mouse_handler = MouseHandler(self.data_handler.handle)
if bool(conf.KeyboardEnabled) != bool(self.key_handler):
if self.key_handler: self.key_handler = self.key_handler.stop()
else: self.key_handler = KeyHandler(self.data_handler.handle)
elif command.startswith("clear "):
parts = command.split()[1:]
category, dates = parts[0], parts[1:]
if "all" == category: tables = sum(conf.InputEvents.values(), ())
elif category in conf.InputEvents: tables = conf.InputEvents[category]
else: tables = [category]
where = [("day", (">=", dates[0])), ("day", ("<=", dates[1]))] if dates else []
for table in tables:
db.delete("counts", where=where, type=table)
db.delete(table, where=where)
elif command.startswith("screen_size "):
# "screen_size [0, 0, 1920, 1200] [1920, 0, 1000, 800]"
sizestrs = filter(bool, map(str.strip, command[12:].replace("[", "").split("]")))
sizes = sorted(map(int, s.replace(",", "").split()) for s in sizestrs)
for i, size in enumerate(sizes):
db.insert("screen_sizes", x=size[0], y=size[1], w=size[2], h=size[3], display=i)
self.data_handler.screen_sizes = sizes
elif "vacuum" == command:
db.execute("VACUUM")
elif "exit" == command:
self.stop()
def stop(self):
self.running = False
self.mouse_handler and self.mouse_handler.stop()
self.key_handler and self.key_handler.stop()
self.data_handler.stop()
self.inqueue.put(None) # Wake up thread waiting on queue
db.close()
sys.exit()
class DataHandler(threading.Thread):
"""Output thread, inserts events to database and to output function."""
def __init__(self, output):
threading.Thread.__init__(self)
self.counts = {} # {type: count}
self.output = output
self.inqueue = Queue.Queue()
self.lasts = {"moves": None}
self.screen_sizes = [[0, 0] + list(conf.DefaultScreenSize)]
self.running = False
self.start()
def run(self):
self.running = True
dbqueue = [] # Data queued for later after first insert failed
db.insert("app_events", type="start")
def get_display(pt):
"""Returns (display index, [x, y, w, h]) for mouse event position."""
for i, size in enumerate(self.screen_sizes):
# Point falls exactly into display
if size[0] <= pt[0] <= size[0] + size[2] \
and size[1] <= pt[1] <= size[1] + size[3]: return i, size
if pt[0] >= self.screen_sizes[-1][0] + self.screen_sizes[-1][2] \
or pt[1] >= self.screen_sizes[-1][1] + self.screen_sizes[-1][3]:
# Point is beyond the last display
return len(self.screen_sizes) - 1, self.screen_sizes[-1]
for i, size in enumerate(self.screen_sizes):
# One coordinate falls into display, other is off screen
if size[0] <= pt[0] <= size[0] + size[2] \
or size[1] <= pt[1] <= size[1] + size[3]: return i, size
return 0, self.screen_sizes[0] # Fall back to first display
def rescale(pt):
"""Remaps point to heatmap size for less granularity."""
HS = conf.MouseHeatmapSize
_, screen_size = get_display(pt)
SC = [screen_size[i + 2] / float(HS[i]) for i in (0, 1)]
return [min(int(pt[i] / SC[i]), HS[i]) for i in (0, 1)]
def one_line(pt1, pt2, pt3):
"""Returns whether points more or less fall onto one line."""
(x1, y1), (x2, y2), (x3, y3) = map(rescale, (pt1, pt2, pt3))
if not (x1 >= x2 >= x3) and not (y1 >= y2 >= y3) \
and not (x1 <= x2 <= x3) and not (y1 <= y2 <= y3): return False
return abs((y1 - y2) * (x1 - x3) - (y1 - y3) * (x1 - x2)) <= conf.MouseMoveJoinRadius
def sign(v): return -1 if v < 0 else 1 if v > 0 else 0
while self.running:
data, items = self.inqueue.get(), []
while data:
items.append(data)
try: data = self.inqueue.get(block=False)
except Queue.Empty: data = None
if not items or not self.running: continue # while self.running
move0, move1, scroll0 = None, None, None
for data in items:
category = data.pop("type")
if category in conf.InputEvents["mouse"]:
data["display"], _ = get_display([data["x"], data["y"]])
if category in self.lasts: # Skip event if same position as last
pos = rescale([data["x"], data["y"]])
if self.lasts[category] == pos: continue # for data
self.lasts[category] = pos
if "moves" == category: # Reduce move events
if move0 and move1 and move1["stamp"] - move0["stamp"] < conf.MouseMoveJoinInterval \
and data["stamp"] - move1["stamp"] < conf.MouseMoveJoinInterval \
and move0["display"] == move1["display"] == data["display"]:
if one_line(*[(v["x"], v["y"]) for v in (move0, move1, data)]):
move1.update(data)
continue # for data
move0, move1 = move1, data
elif "scrolls" == category: # Reduce scroll events
if scroll0 and scroll0["display"] == data["display"] \
and sign(scroll0["dx"]) == sign(data["dx"]) \
and sign(scroll0["dy"]) == sign(data["dy"]) \
and data["stamp"] - scroll0["stamp"] < conf.MouseScrollJoinInterval:
for k in ("dx", "dy"): scroll0[k] += data[k]
for k in ("stamp", "x", "y"): scroll0[k] = data[k]
continue # for data
scroll0 = data
if category not in self.counts: self.counts[category] = 0
self.counts[category] += 1
dbqueue.append((category, data))
try:
while dbqueue:
db.insert(*dbqueue[0])
dbqueue.pop(0)
except StandardError as e:
print(e, category, data)
self.output(self.counts)
if conf.EventsWriteInterval > 0: time.sleep(conf.EventsWriteInterval)
def stop(self):
self.running = False
self.inqueue.put(None) # Wake up thread waiting on queue
db.close()
def handle(self, **kwargs):
category = kwargs.get("type")
if not getattr(conf, conf.InputFlags.get(category), False): return
kwargs.update(day=datetime.date.today(), stamp=time.time())
self.inqueue.put(kwargs)
class MouseHandler(object):
"""Listens to mouse events and forwards to output."""
def __init__(self, output):
self._output = output
self._buttons = {"left": 1, "right": 2, "middle": 3, "unknown": 0}
for b in pynput.mouse.Button:
if b.name not in self._buttons:
self._buttons[b.name] = len(self._buttons)
self._listener = pynput.mouse.Listener(
on_move=self.move, on_click=self.click, on_scroll=self.scroll
)
self._listener.start()
def click(self, x, y, button, pressed, *a, **kw):
if pressed:
buttonindex = self._buttons.get(button.name, 0)
self._output(type="clicks", x=x, y=y, button=buttonindex)
def move(self, x, y, *a, **kw):
self._output(type="moves", x=x, y=y)
def scroll(self, x, y, dx, dy, *a, **kw):
self._output(type="scrolls", x=x, y=y, dx=dx, dy=dy)
def stop(self): self._listener.stop()
class KeyHandler(object):
"""Listens to keyboard events and forwards to output."""
CONTROLCODES = {"\x00": "Nul", "\x01": "Start-Of-Header", "\x02": "Start-Of-Text", "\x03": "Break", "\x04": "End-Of-Transmission", "\x05": "Enquiry", "\x06": "Ack", "\x07": "Bell", "\x08": "Backspace", "\x09": "Tab", "\x0a": "Linefeed", "\x0b": "Vertical-Tab", "\x0c": "Form-Fe", "\x0d": "Enter", "\x0e": "Shift-In", "\x0f": "Shift-Out", "\x10": "Data-Link-Escape", "\x11": "Devicecontrol1", "\x12": "Devicecontrol2", "\x13": "Devicecontrol3", "\x14": "Devicecontrol4", "\x15": "Nak", "\x16": "Syn", "\x17": "End-Of-Transmission-Block", "\x18": "Break", "\x19": "End-Of-Medium", "\x1a": "Substitute", "\x1b": "Escape", "\x1c": "File-Separator", "\x1d": "Group-Separator", "\x1e": "Record-Separator", "\x1f": "Unit-Separator", "\x20": "Space", "\x7f": "Del", "\xa0": "Non-Breaking Space"}
NUMPAD_SPECIALS = [("Insert", False), ("Delete", False), ("Home", False), ("End", False), ("PageUp", False), ("PageDown", False), ("Up", False), ("Down", False), ("Left", False), ("Right", False), ("Clear", False), ("Enter", True)]
NUMPAD_CHARS = {"0": "Numpad0", "1": "Numpad1", "2": "Numpad2", "3": "Numpad3", "4": "Numpad4", "5": "Numpad5", "6": "Numpad6", "7": "Numpad7", "8": "Numpad8", "9": "Numpad9", "/": "Numpad-Divide", "*": "Numpad-Multiply", "-": "Numpad-Subtract", "+": "Numpad-Add", }
MODIFIERNAMES = {"Lcontrol": "Ctrl", "Rcontrol": "Ctrl", "Lshift": "Shift", "Rshift": "Shift", "Alt": "Alt", "Lwin": "Win", "Rwin": "Win", "AltGr": "AltGr"}
RENAMES = {"Prior": "PageUp", "Next": "PageDown", "Lmenu": "Alt", "Rmenu": "AltGr", "Apps": "Menu", "Return": "Enter", "Back": "Backspace", "Capital": "CapsLock", "Numlock": "NumLock", "Snapshot": "PrintScreen", "Scroll": "ScrollLock", "Decimal": "Numpad-Decimal", "Divide": "Numpad-Divide", "Subtract": "Numpad-Subtract", "Multiply": "Numpad-Multiply", "Add": "Numpad-Add", "Cancel": "Break", "Control_L": "Lcontrol", "Control_R": "Rcontrol", "Alt_L": "Alt", "Shift_L": "Lshift", "Shift_R": "Rshift", "Super_L": "Lwin", "Super_R": "Rwin", "BackSpace": "Backspace", "L1": "F11", "L2": "F12", "Page_Up": "PageUp", "Print": "PrintScreen", "Scroll_Lock": "ScrollLock", "Caps_Lock": "CapsLock", "Num_Lock": "NumLock", "Begin": "Clear", "Super": "Win", "Mode_switch": "AltGr"}
STICKY_KEYS = ["Lcontrol", "Rcontrol", "Lshift", "Rshift", "Alt", "AltGr", "Lwin", "Rwin", "ScrollLock", "CapsLock", "NumLock"]
PYNPUT_NAMES = {"alt_l": "Alt", "alt_r": "AltGr", "cmd": "Lwin", "cmd_l": "Lwin", "cmd_r": "Rwin", "ctrl": "Lcontrol", "ctrl_l": "Lcontrol", "ctrl_r": "Rcontrol", "esc": "Escape", "shift": "Lshift", "shift_l": "Lshift", "shift_r": "Rshift", "pause": "Break"}
VK_NAMES = { # Virtual keycode values on Windows
226: "Oem_102", # Right from Lshift
188: "Oem_Comma", # Right from M
190: "Oem_Period", # Right from Oem_Comma
221: "Oem_6", # Left from Rshift
186: "Oem_1", # Right from L
191: "Oem_2", # Right from Oem_1
220: "Oem_5", # Right from Oem_2
192: "Oem_3", # Right from P
219: "Oem_4", # Right from Oem_3
222: "Oem_7", # Left from 1
189: "Oem_Minus", # Right from 0
187: "Oem_Plus", # Left from Backspace
96: "Numpad0",
97: "Numpad1",
98: "Numpad2",
99: "Numpad3",
100: "Numpad4",
101: "Numpad5",
102: "Numpad6",
103: "Numpad7",
104: "Numpad8",
105: "Numpad9",
12: "Numpad-Clear", # Numpad5 without NumLock
111: "Numpad-Divide",
106: "Numpad-Multiply",
109: "Numpad-Subtract",
107: "Numpad-Add",
172: "Web/Home", # Extra top keys
180: "Email",
181: "Media",
183: "Calculator",
}
OTHER_VK_NAMES = { # Not Windows
65027: "AltGr",
65437: "Numpad-Clear", # Numpad5 without NumLock
269025041: "MediaVolumeDown",
269025042: "MediaVolumeMute",
269025043: "MediaVolumeUp",
269025044: "MediaPlayPause",
269025048: "Web/Home",
269025049: "Email",
269025053: "Calculator",
269025074: "Media",
}
def __init__(self, output):
self.KEYNAMES = {k: v for k, v in self.PYNPUT_NAMES.items()} # pynput.Key.xyz.name: label
for key in pynput.keyboard.Key:
if key.name not in self.KEYNAMES:
self.KEYNAMES[key.name] = self.nicename(key.name)
self._output = output
self._downs = {} # {key name: bool}
self._modifiers = dict((x, False) for x in self.MODIFIERNAMES.values())
self._realmodifiers = dict((x, False) for x in self.MODIFIERNAMES)
# Extended keys: AltGr, Rcontrol; clustered Insert/Delete/navigation and arrows;
# NumLock; Break; PrintScreen; Numpad-Divide, Numpad-Enter
self._is_extended = None # Current key is extended key
args = dict(on_press= lambda k, *a, **kw: self.on_event(True, k),
on_release=lambda k, *a, **kw: self.on_event(False, k))
if "win32" == sys.platform:
args.update(win32_event_filter=self.win32_filter)
# Cannot inherit from pynput.keyboard.Listener directly,
# as it does hacky magic dependant on self.__class__.__module__
self._listener = pynput.keyboard.Listener(**args)
self._listener.start()
def on_event(self, pressed, key):
"""Handler for key event."""
mykey, realkey = self.extract(key)
if not mykey and not realkey: return
if realkey in self.MODIFIERNAMES:
self._modifiers[self.MODIFIERNAMES[realkey]] = pressed
self._realmodifiers[realkey] = pressed
if realkey in self.STICKY_KEYS and self._downs.get(realkey) == pressed:
return # Avoid multiple events on holding down Shift etc
self._downs[realkey] = pressed
if not conf.KeyboardEnabled or not pressed: return
if DEBUG: print("Adding key %s (real %s)" % (mykey.encode("utf-8"), realkey.encode("utf-8")))
self._output(type="keys", key=mykey, realkey=realkey)
if mykey not in self.MODIFIERNAMES and conf.KeyboardCombosEnabled:
modifier = "-".join(k for k in ["Ctrl", "Alt", "AltGr", "Shift", "Win"]
if self._modifiers[k])
if modifier and modifier != "Shift": # Shift-X is not a combo
mykey = "%s-%s" % (modifier, realkey)
realmodifier = "-".join(k for k, v in self._realmodifiers.items() if v)
realkey = "%s-%s" % (realmodifier, realkey)
if DEBUG: print("Adding combo %s (real %s)" % (mykey.encode("utf-8"), realkey.encode("utf-8")))
self._output(type="combos", key=mykey, realkey=realkey)
def extract(self, key):
"""Returns (key name or uppercase char, realkey name) for pynput event."""
if isinstance(key, pynput.keyboard.Key):
name, char, vk = key.name, key.value.char, key.value.vk
else: # pynput.keyboard.KeyCode
name, char, vk = None, key.char, key.vk
if name:
name = self.KEYNAMES.get(name) or self.nicename(name)
name = realname = self.RENAMES.get(name, name)
if vk and (name, self._is_extended) in self.NUMPAD_SPECIALS:
name = realname = "Numpad-" + name
elif ord("A") <= vk <= ord("Z"): # Common A..Z keys, whatever the chars
name, realname = char.upper() if char else chr(vk), chr(vk)
if not name and "win32" != sys.platform:
if char and vk: name = char.upper()
elif char in self.NUMPAD_CHARS: name = self.NUMPAD_CHARS[char]
elif vk in self.OTHER_VK_NAMES: name = self.OTHER_VK_NAMES[vk]
else: name = char.upper() if char else zhex(vk) if vk else None
realname = name
elif vk in self.VK_NAMES: # Numpad and extra keys
realname = self.VK_NAMES[vk]
name = char.upper() if char and "Oem_" in realname else realname
elif name: pass
elif char and vk:
name = char.upper()
realname = chr(vk) if ord("0") <= vk <= ord("9") else zhex(vk)
else: realname = None
if name in self.CONTROLCODES:
realname = self.CONTROLCODES.get(realname, realname)
# Combos can also produce control codes, e.g. Ctrl-Y: End-of-Medium
if self._modifiers["Ctrl"]: name = realname
else: name = self.CONTROLCODES[name]
return name, realname
def win32_filter(self, msg, data):
"""Stores extended-key bit for upcoming press/release event."""
# Pressing AltGr generates a dummy Lcontrol event: skip on_event()
if 541 == data.scanCode and 162 == data.vkCode: return False
# KBDLLHOOKSTRUCT.flags bit 0 is LLKHF_EXTENDED
self._is_extended = data.flags & 0x01
def nicename(self, s):
"""Transforms snake case like "alt_gr" to Pascal case like "AltGr"."""
return "".join(x.capitalize() for x in s.split("_"))
def stop(self): self._listener.stop()
class LineQueue(threading.Thread):
"""Reads lines from a file-like object and pushes to self.queue."""
def __init__(self, input):
threading.Thread.__init__(self)
self.daemon = True
self.input, self.queue = input, Queue.Queue()
self.start()
def run(self):
for line in iter(self.input.readline, ""):
self.queue.put(line.strip())
def zhex(v):
"""Returns number as zero-padded hex, e.g. "0x0C" for 12 and "0x0100" for 256."""
if not v: return "0x00"
sign, v = ("-" if v < 0 else ""), abs(v)
return "%s0x%0*X" % (sign, 2 * int(1 + math.log(v) / math.log(2) // 8), v)
def start(inqueue, outqueue=None):
"""Starts the listener with incoming and outgoing queues."""
conf.init(), db.init(conf.DbPath, conf.DbStatements)
# Carry out db update for tables lacking expected new columns
for (table, col), sqls in conf.DbUpdateStatements:
if any(col == x["name"] for x in db.execute("PRAGMA table_info(%s)" % table)):
continue # for
for sql in sqls: db.execute(sql)
try: db.execute("PRAGMA journal_mode = WAL")
except Exception: pass
try: Listener(inqueue, outqueue).run()
except KeyboardInterrupt: pass
def main():
"""Entry point for stand-alone execution."""
conf.init()
inqueue = LineQueue(sys.stdin).queue
outqueue = type("", (), {"put": lambda self, x: print("\r%s" % x, end=" ")})()
if "--quiet" in sys.argv: outqueue = None
if conf.MouseEnabled: inqueue.put("start mouse")
if conf.KeyboardEnabled: inqueue.put("start keyboard")
start(inqueue, outqueue)
if "__main__" == __name__:
main()
|
"""Commands module."""
|
from __future__ import absolute_import, division, print_function
import unittest
import os.path
from GenomicConsensus.options import Constants
from pbcore.io import ContigSet
import pbcommand.testkit
import pbtestdata
DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
assert os.path.isdir(DATA_DIR)
class TestVariantCaller(pbcommand.testkit.PbTestApp):
DRIVER_BASE = "variantCaller "
DRIVER_EMIT = DRIVER_BASE + " --emit-tool-contract "
DRIVER_RESOLVE = DRIVER_BASE + " --resolved-tool-contract "
REQUIRES_PBCORE = True
INPUT_FILES = [
pbtestdata.get_file("aligned-xml"), pbtestdata.get_file("lambdaNEB")
]
TASK_OPTIONS = {
"genomic_consensus.task_options.min_coverage": 0,
"genomic_consensus.task_options.min_confidence": 0,
"genomic_consensus.task_options.algorithm": "quiver",
"genomic_consensus.task_options.diploid": False,
}
def run_after(self, rtc, output_dir):
contigs_file = rtc.task.output_files[2]
with ContigSet(contigs_file, strict=True) as ds:
pass
class TestVariantCallerArrow(TestVariantCaller):
TASK_OPTIONS = {
"genomic_consensus.task_options.algorithm": "arrow",
}
def run_after(self, rtc, output_dir):
super(TestVariantCallerArrow, self).run_after(rtc, output_dir)
self.assertTrue(bool(rtc.task.options[Constants.MASKING_ID]))
class TestGffToBed(pbcommand.testkit.PbTestApp):
DRIVER_BASE = "gffToBed "
DRIVER_EMIT = DRIVER_BASE + " --emit-tool-contract "
DRIVER_RESOLVE = DRIVER_BASE + " --resolved-tool-contract "
REQUIRES_PBCORE = True
INPUT_FILES = [
os.path.join(DATA_DIR, "converters", "variants.gff.gz"),
]
TASK_OPTIONS = {
"genomic_consensus.task_options.track_name": "None",
"genomic_consensus.task_options.track_description": "None",
"genomic_consensus.task_options.use_score": 0,
}
class TestGffToVcf(pbcommand.testkit.PbTestApp):
DRIVER_BASE = "gffToVcf"
DRIVER_EMIT = DRIVER_BASE + " --emit-tool-contract "
DRIVER_RESOLVE = DRIVER_BASE + " --resolved-tool-contract "
REQUIRES_PBCORE = True
INPUT_FILES = [
os.path.join(DATA_DIR, "converters", "variants.gff.gz"),
]
TASK_OPTIONS = {
"genomic_consensus.task_options.global_reference": "Staphylococcus_aureus_USA300_TCH1516",
}
class TestSummarizeConsensus(pbcommand.testkit.PbTestApp):
DRIVER_BASE = "summarizeConsensus"
DRIVER_EMIT = DRIVER_BASE + " --emit-tool-contract "
DRIVER_RESOLVE = DRIVER_BASE + " --resolved-tool-contract "
REQUIRES_PBCORE = True
INPUT_FILES = [
pbtestdata.get_file("alignment-summary-gff"),
pbtestdata.get_file("variants-gff")
]
TASK_OPTIONS = {}
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
"""
An alphabetical list of Finnish municipalities for use as `choices` in a
formfield.
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
MUNICIPALITY_CHOICES = (
('akaa', u"Akaa"),
('alajarvi', u"Alajärvi"),
('alavieska', u"Alavieska"),
('alavus', u"Alavus"),
('artjarvi', u"Artjärvi"),
('asikkala', u"Asikkala"),
('askola', u"Askola"),
('aura', u"Aura"),
('brando', u"Brändö"),
('eckero', u"Eckerö"),
('enonkoski', u"Enonkoski"),
('enontekio', u"Enontekiö"),
('espoo', u"Espoo"),
('eura', u"Eura"),
('eurajoki', u"Eurajoki"),
('evijarvi', u"Evijärvi"),
('finstrom', u"Finström"),
('forssa', u"Forssa"),
('foglo', u"Föglö"),
('geta', u"Geta"),
('haapajarvi', u"Haapajärvi"),
('haapavesi', u"Haapavesi"),
('hailuoto', u"Hailuoto"),
('halsua', u"Halsua"),
('hamina', u"Hamina"),
('hammarland', u"Hammarland"),
('hankasalmi', u"Hankasalmi"),
('hanko', u"Hanko"),
('harjavalta', u"Harjavalta"),
('hartola', u"Hartola"),
('hattula', u"Hattula"),
('haukipudas', u"Haukipudas"),
('hausjarvi', u"Hausjärvi"),
('heinola', u"Heinola"),
('heinavesi', u"Heinävesi"),
('helsinki', u"Helsinki"),
('hirvensalmi', u"Hirvensalmi"),
('hollola', u"Hollola"),
('honkajoki', u"Honkajoki"),
('huittinen', u"Huittinen"),
('humppila', u"Humppila"),
('hyrynsalmi', u"Hyrynsalmi"),
('hyvinkaa', u"Hyvinkää"),
('hameenkoski', u"Hämeenkoski"),
('hameenkyro', u"Hämeenkyrö"),
('hameenlinna', u"Hämeenlinna"),
('ii', u"Ii"),
('iisalmi', u"Iisalmi"),
('iitti', u"Iitti"),
('ikaalinen', u"Ikaalinen"),
('ilmajoki', u"Ilmajoki"),
('ilomantsi', u"Ilomantsi"),
('imatra', u"Imatra"),
('inari', u"Inari"),
('inkoo', u"Inkoo"),
('isojoki', u"Isojoki"),
('isokyro', u"Isokyrö"),
('jalasjarvi', u"Jalasjärvi"),
('janakkala', u"Janakkala"),
('joensuu', u"Joensuu"),
('jokioinen', u"Jokioinen"),
('jomala', u"Jomala"),
('joroinen', u"Joroinen"),
('joutsa', u"Joutsa"),
('juankoski', u"Juankoski"),
('juuka', u"Juuka"),
('juupajoki', u"Juupajoki"),
('juva', u"Juva"),
('jyvaskyla', u"Jyväskylä"),
('jamijarvi', u"Jämijärvi"),
('jamsa', u"Jämsä"),
('jarvenpaa', u"Järvenpää"),
('kaarina', u"Kaarina"),
('kaavi', u"Kaavi"),
('kajaani', u"Kajaani"),
('kalajoki', u"Kalajoki"),
('kangasala', u"Kangasala"),
('kangasniemi', u"Kangasniemi"),
('kankaanpaa', u"Kankaanpää"),
('kannonkoski', u"Kannonkoski"),
('kannus', u"Kannus"),
('karijoki', u"Karijoki"),
('karjalohja', u"Karjalohja"),
('karkkila', u"Karkkila"),
('karstula', u"Karstula"),
('karttula', u"Karttula"),
('karvia', u"Karvia"),
('kaskinen', u"Kaskinen"),
('kauhajoki', u"Kauhajoki"),
('kauhava', u"Kauhava"),
('kauniainen', u"Kauniainen"),
('kaustinen', u"Kaustinen"),
('keitele', u"Keitele"),
('kemi', u"Kemi"),
('kemijarvi', u"Kemijärvi"),
('keminmaa', u"Keminmaa"),
('kemionsaari', u"Kemiönsaari"),
('kempele', u"Kempele"),
('kerava', u"Kerava"),
('kerimaki', u"Kerimäki"),
('kesalahti', u"Kesälahti"),
('keuruu', u"Keuruu"),
('kihnio', u"Kihniö"),
('kiikoinen', u"Kiikoinen"),
('kiiminki', u"Kiiminki"),
('kinnula', u"Kinnula"),
('kirkkonummi', u"Kirkkonummi"),
('kitee', u"Kitee"),
('kittila', u"Kittilä"),
('kiuruvesi', u"Kiuruvesi"),
('kivijarvi', u"Kivijärvi"),
('kokemaki', u"Kokemäki"),
('kokkola', u"Kokkola"),
('kolari', u"Kolari"),
('konnevesi', u"Konnevesi"),
('kontiolahti', u"Kontiolahti"),
('korsnas', u"Korsnäs"),
('koskitl', u"Koski Tl"),
('kotka', u"Kotka"),
('kouvola', u"Kouvola"),
('kristiinankaupunki', u"Kristiinankaupunki"),
('kruunupyy', u"Kruunupyy"),
('kuhmalahti', u"Kuhmalahti"),
('kuhmo', u"Kuhmo"),
('kuhmoinen', u"Kuhmoinen"),
('kumlinge', u"Kumlinge"),
('kuopio', u"Kuopio"),
('kuortane', u"Kuortane"),
('kurikka', u"Kurikka"),
('kustavi', u"Kustavi"),
('kuusamo', u"Kuusamo"),
('kylmakoski', u"Kylmäkoski"),
('kyyjarvi', u"Kyyjärvi"),
('karkola', u"Kärkölä"),
('karsamaki', u"Kärsämäki"),
('kokar', u"Kökar"),
('koylio', u"Köyliö"),
('lahti', u"Lahti"),
('laihia', u"Laihia"),
('laitila', u"Laitila"),
('lapinjarvi', u"Lapinjärvi"),
('lapinlahti', u"Lapinlahti"),
('lappajarvi', u"Lappajärvi"),
('lappeenranta', u"Lappeenranta"),
('lapua', u"Lapua"),
('laukaa', u"Laukaa"),
('lavia', u"Lavia"),
('lemi', u"Lemi"),
('lemland', u"Lemland"),
('lempaala', u"Lempäälä"),
('leppavirta', u"Leppävirta"),
('lestijarvi', u"Lestijärvi"),
('lieksa', u"Lieksa"),
('lieto', u"Lieto"),
('liminka', u"Liminka"),
('liperi', u"Liperi"),
('lohja', u"Lohja"),
('loimaa', u"Loimaa"),
('loppi', u"Loppi"),
('loviisa', u"Loviisa"),
('luhanka', u"Luhanka"),
('lumijoki', u"Lumijoki"),
('lumparland', u"Lumparland"),
('luoto', u"Luoto"),
('luumaki', u"Luumäki"),
('luvia', u"Luvia"),
('lansi-turunmaa', u"Länsi-Turunmaa"),
('maalahti', u"Maalahti"),
('maaninka', u"Maaninka"),
('maarianhamina', u"Maarianhamina"),
('marttila', u"Marttila"),
('masku', u"Masku"),
('merijarvi', u"Merijärvi"),
('merikarvia', u"Merikarvia"),
('miehikkala', u"Miehikkälä"),
('mikkeli', u"Mikkeli"),
('muhos', u"Muhos"),
('multia', u"Multia"),
('muonio', u"Muonio"),
('mustasaari', u"Mustasaari"),
('muurame', u"Muurame"),
('mynamaki', u"Mynämäki"),
('myrskyla', u"Myrskylä"),
('mantsala', u"Mäntsälä"),
('mantta-vilppula', u"Mänttä-Vilppula"),
('mantyharju', u"Mäntyharju"),
('naantali', u"Naantali"),
('nakkila', u"Nakkila"),
('nastola', u"Nastola"),
('nilsia', u"Nilsiä"),
('nivala', u"Nivala"),
('nokia', u"Nokia"),
('nousiainen', u"Nousiainen"),
('nummi-pusula', u"Nummi-Pusula"),
('nurmes', u"Nurmes"),
('nurmijarvi', u"Nurmijärvi"),
('narpio', u"Närpiö"),
('oravainen', u"Oravainen"),
('orimattila', u"Orimattila"),
('oripaa', u"Oripää"),
('orivesi', u"Orivesi"),
('oulainen', u"Oulainen"),
('oulu', u"Oulu"),
('oulunsalo', u"Oulunsalo"),
('outokumpu', u"Outokumpu"),
('padasjoki', u"Padasjoki"),
('paimio', u"Paimio"),
('paltamo', u"Paltamo"),
('parikkala', u"Parikkala"),
('parkano', u"Parkano"),
('pedersore', u"Pedersöre"),
('pelkosenniemi', u"Pelkosenniemi"),
('pello', u"Pello"),
('perho', u"Perho"),
('pertunmaa', u"Pertunmaa"),
('petajavesi', u"Petäjävesi"),
('pieksamaki', u"Pieksämäki"),
('pielavesi', u"Pielavesi"),
('pietarsaari', u"Pietarsaari"),
('pihtipudas', u"Pihtipudas"),
('pirkkala', u"Pirkkala"),
('polvijarvi', u"Polvijärvi"),
('pomarkku', u"Pomarkku"),
('pori', u"Pori"),
('pornainen', u"Pornainen"),
('porvoo', u"Porvoo"),
('posio', u"Posio"),
('pudasjarvi', u"Pudasjärvi"),
('pukkila', u"Pukkila"),
('punkaharju', u"Punkaharju"),
('punkalaidun', u"Punkalaidun"),
('puolanka', u"Puolanka"),
('puumala', u"Puumala"),
('pyhtaa', u"Pyhtää"),
('pyhajoki', u"Pyhäjoki"),
('pyhajarvi', u"Pyhäjärvi"),
('pyhanta', u"Pyhäntä"),
('pyharanta', u"Pyhäranta"),
('palkane', u"Pälkäne"),
('poytya', u"Pöytyä"),
('raahe', u"Raahe"),
('raasepori', u"Raasepori"),
('raisio', u"Raisio"),
('rantasalmi', u"Rantasalmi"),
('ranua', u"Ranua"),
('rauma', u"Rauma"),
('rautalampi', u"Rautalampi"),
('rautavaara', u"Rautavaara"),
('rautjarvi', u"Rautjärvi"),
('reisjarvi', u"Reisjärvi"),
('riihimaki', u"Riihimäki"),
('ristiina', u"Ristiina"),
('ristijarvi', u"Ristijärvi"),
('rovaniemi', u"Rovaniemi"),
('ruokolahti', u"Ruokolahti"),
('ruovesi', u"Ruovesi"),
('rusko', u"Rusko"),
('raakkyla', u"Rääkkylä"),
('saarijarvi', u"Saarijärvi"),
('salla', u"Salla"),
('salo', u"Salo"),
('saltvik', u"Saltvik"),
('sastamala', u"Sastamala"),
('sauvo', u"Sauvo"),
('savitaipale', u"Savitaipale"),
('savonlinna', u"Savonlinna"),
('savukoski', u"Savukoski"),
('seinajoki', u"Seinäjoki"),
('sievi', u"Sievi"),
('siikainen', u"Siikainen"),
('siikajoki', u"Siikajoki"),
('siikalatva', u"Siikalatva"),
('siilinjarvi', u"Siilinjärvi"),
('simo', u"Simo"),
('sipoo', u"Sipoo"),
('siuntio', u"Siuntio"),
('sodankyla', u"Sodankylä"),
('soini', u"Soini"),
('somero', u"Somero"),
('sonkajarvi', u"Sonkajärvi"),
('sotkamo', u"Sotkamo"),
('sottunga', u"Sottunga"),
('sulkava', u"Sulkava"),
('sund', u"Sund"),
('suomenniemi', u"Suomenniemi"),
('suomussalmi', u"Suomussalmi"),
('suonenjoki', u"Suonenjoki"),
('sysma', u"Sysmä"),
('sakyla', u"Säkylä"),
('taipalsaari', u"Taipalsaari"),
('taivalkoski', u"Taivalkoski"),
('taivassalo', u"Taivassalo"),
('tammela', u"Tammela"),
('tampere', u"Tampere"),
('tarvasjoki', u"Tarvasjoki"),
('tervo', u"Tervo"),
('tervola', u"Tervola"),
('teuva', u"Teuva"),
('tohmajarvi', u"Tohmajärvi"),
('toholampi', u"Toholampi"),
('toivakka', u"Toivakka"),
('tornio', u"Tornio"),
('turku', u"Turku"),
('tuusniemi', u"Tuusniemi"),
('tuusula', u"Tuusula"),
('tyrnava', u"Tyrnävä"),
('toysa', u"Töysä"),
('ulvila', u"Ulvila"),
('urjala', u"Urjala"),
('utajarvi', u"Utajärvi"),
('utsjoki', u"Utsjoki"),
('uurainen', u"Uurainen"),
('uusikaarlepyy', u"Uusikaarlepyy"),
('uusikaupunki', u"Uusikaupunki"),
('vaala', u"Vaala"),
('vaasa', u"Vaasa"),
('valkeakoski', u"Valkeakoski"),
('valtimo', u"Valtimo"),
('vantaa', u"Vantaa"),
('varkaus', u"Varkaus"),
('varpaisjarvi', u"Varpaisjärvi"),
('vehmaa', u"Vehmaa"),
('vesanto', u"Vesanto"),
('vesilahti', u"Vesilahti"),
('veteli', u"Veteli"),
('vierema', u"Vieremä"),
('vihanti', u"Vihanti"),
('vihti', u"Vihti"),
('viitasaari', u"Viitasaari"),
('vimpeli', u"Vimpeli"),
('virolahti', u"Virolahti"),
('virrat', u"Virrat"),
('vardo', u"Vårdö"),
('vahakyro', u"Vähäkyrö"),
('voyri-maksamaa', u"Vöyri-Maksamaa"),
('yli-ii', u"Yli-Ii"),
('ylitornio', u"Ylitornio"),
('ylivieska', u"Ylivieska"),
('ylojarvi', u"Ylöjärvi"),
('ypaja', u"Ypäjä"),
('ahtari', u"Ähtäri"),
('aanekoski', u"Äänekoski")
)
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('labour', '0014_auto_20151108_1906'),
]
operations = [
migrations.CreateModel(
name='Shift',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('start_time', models.DateTimeField()),
('hours', models.PositiveIntegerField()),
('notes', models.TextField()),
('job', models.ForeignKey(on_delete=models.CASCADE, to='labour.Job')),
('signup', models.ForeignKey(on_delete=models.CASCADE, to='labour.Signup')),
],
options={
'verbose_name': 'ty\xf6vuoro',
'verbose_name_plural': 'ty\xf6vuorot',
},
),
]
|
import pandas as pd
from flask import Flask, render_template, request
from recommendation.recommender import item_based_recom, rename_columns, item_and_genre_based_recom, categories
app = Flask(__name__)
app.recommender = pd.read_parquet('models/recommender_df.parquet.gzip')
app.movies = pd.read_parquet('models/movies.parquet.gzip')
rename_columns(app.recommender)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/title')
def recommend_movie_form():
return render_template('movie_form.html', app=app)
@app.route('/title', methods=['POST'])
def recommend_movie():
movie_title = request.form['movie_title']
try:
df = item_based_recom(app.recommender, movie_title).head(10)
except Exception:
return render_template('error_title.html')
return render_template('movie_title.html', name='Movie recommendation for {}'.format(movie_title), data=df)
@app.route('/genre')
def recommend_movie_genre_form():
return render_template('movie_form.html', app=app)
@app.route('/genre', methods=['POST'])
def recommend_movie_genre():
movie_title = request.form['movie_title']
try:
df = item_and_genre_based_recom(item_based_recom(app.recommender, movie_title), app.movies, categories).head(10)
except Exception:
return render_template('error_title.html')
return render_template('movie_genre.html', name='Movie and genre recommendation for {}'.format(movie_title),
data=df)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from setuptools import find_packages, setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
__import__("adminlte")
setup(
name='django-adminlte-x',
version='0.22.1',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='AdminLTE Bootstrap Theme packaged for Django',
long_description=open('README.md', 'r').read(),
url='https://github.com/dnaextrim/django_adminlte_x',
author='Dony Wahyu Isprananda',
author_email='dna.extrim@gmail.com',
classifiers=[
"Development Status :: 5 - Production/Stable",
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.9', # replace "X.Y" as appropriate
'Framework :: Django :: 1.10', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
keywords='django bootstrap adminlte theme',
install_requires=[
"django-appconf>=0.6.0"
],
zip_safe=False,
)
|
from pypbbot import app, run_server, BaseDriver
from pypbbot.utils import sendBackClipsToAndWait, SingletonType
class SingletonDriver(BaseDriver, metaclass=SingletonType):
def onPrivateMessage(self, event):
message = event.raw_message
if message.startswith('#repr'):
sendBackClipsToAndWait(event, repr(self))
app.driver_builder = SingletonDriver
if __name__ == '__main__':
run_server(app='__main__:app', host='localhost', port=8082, reload=True)
|
__doc__ = """Deprecated.
"""
__author__ = "Rui Campos"
from scipy.interpolate import CubicSpline, RectBivariateSpline, BarycentricInterpolator, interp1d
from numpy import *
from numpy.random import rand
from scipy.integrate import *
class UnivariateDistribution:
def __init__(self, xAxis = [], yAxis = []):
xAxis, yAxis = map(array, (xAxis, yAxis))
self.xAxis = xAxis
self.yAxis = yAxis
normalization = trapz(yAxis, x = xAxis)
distribution = yAxis/normalization
cum = cumtrapz(distribution, x = xAxis, initial = 0)
self.cum = interp1d(xAxis, cum)
self.invCum = interp1d(cum, xAxis)
class Distribution: #RITA method(kind of)
def __init__(self, xAxis = [], yAxisCol = [], EAxis = [], cumMoments = False):
"""
Normalizes the distributions. Interpolates the probability distribution.
Calculates and interpolates the inverse cumulutative function on the provided grid 'xAxis'.
Usage:
> x = arange(0, 1, .1)
> energy = arange(0, 50)
> probList = [prob1, prob2, prob3, ..., prob50]
> dist = Distribution(xAxis = x, EAxis = energy, yAxisCol = probList)
This will perform all operations and store them in memory.
To get the distribution, one must update the state by giving an energy value:
> dist.update_dist(1e6)
This will perform aliasing according to a log interpolation in E and update
the state, i.e. dist.prob and dist.invCum are available(and callable).
"""
#note: yCol = Collection of y's
assert len(EAxis) == len(yAxisCol)
assert len(xAxis) == len(yAxisCol[0])
#note: yAxis is an array of arrays with len(E)
EAxis, xAxis, yAxisCol = map(array, [EAxis, xAxis, yAxisCol])
self.EAxis = EAxis
self.xAxis = xAxis
self.yAxisCol = yAxisCol
adhocCS = [trapz(yAxis, x = xAxis) for yAxis in yAxisCol]
self.probCol = [yAxis/cs for cs, yAxis in zip(adhocCS, yAxisCol)]
self.probCol = array(self.probCol)
self.cumCol = [cumtrapz(prob, x = xAxis, initial = 0) for prob in self.probCol]
self.invCumCol = [interp1d(cum, xAxis) for cum in self.cumCol]
#can be improved, xAxis can be made denser if interpolation of prob is used
if cumMoments is True:
self.probCol_m1 = []
self.probCol_m2 = []
for prob in self.probCol:
self.probCol_m1.append(cumtrapz(prob*xAxis, x = xAxis, initial = 0))
self.probCol_m2.append(cumtrapz(prob*xAxis**2, x = xAxis, initial = 0))
self.T1Col = [interp1d(xAxis, m1) for m1 in self.probCol_m1]
self.T2Col = [interp1d(xAxis, m2) for m2 in self.probCol_m2]
self.cumMoments = cumMoments
def update_dist(self, E):
"""Update distributions according to energy."""
k = searchsorted(self.EAxis, E, side='left')
Ek, Ek_ = self.EAxis[k-1], self.EAxis[k]
pr = (log(Ek_) - log(E))/(log(Ek_) - log(Ek))
if rand() < pr: k -= 1
self.E = E
self.prob = self.probCol[k]
self.invCum = self.invCumCol[k]
if self.cumMoments is True:
self.T1 = self.T1Col[k]
self.T2 = self.T2Col[k]
|
"""Urls mappings for both UI views and REST api calls."""
from django.conf.urls import patterns, url
import django.contrib.auth.views
from spudblog import views
urlpatterns = patterns(
'',
# UI view urls
url(r'^$', django.contrib.auth.views.login,
{'template_name': 'spudblog/login.html'}, name='login'),
url(r'^blog-explorer/$', views.blog_explorer),
url(r'^my-blogs/$', views.my_blogs),
url(r'^logout/$', views.logout),
# API urls
url(r'^api/all/$', views.all),
# Allow for optional /blog_id or /post_id
# Lack of such is usfull for POST but not PUT and DELETE
url(r'^api/full-blog/(?P<blog_id>\d+)$', views.full_blog),
url(r'^api/blog(/(?P<blog_id>\d*))?$', views.blog),
url(r'^api/post(/(?P<post_id>\d*))?$', views.post))
|
query = """
DROP TABLE IF EXISTS evidences;
CREATE TABLE IF NOT EXISTS evidences (
id serial PRIMARY KEY,
incident_id VARCHAR,
link VARCHAR,
FOREIGN KEY (incident_id) REFERENCES incidents (id)
);
"""
|
import json
import matplotlib.pyplot as plt
with open('new_cases.json', 'r+') as f:
new_cases = json.load(f)
COUNTRIES = ['India', 'Italy', 'Spain', 'South Korea']
growth_rate = {}
for country in COUNTRIES:
growth_rate[country] = []
report_no = 0
for country in COUNTRIES:
for report in range(len(new_cases[country])):
report_no = new_cases[country][report][0]
if report_no == 14:
growth_factor = 0
else:
if new_cases[country][report-1][2] == 0:
growth_factor = 0
else:
growth_factor = new_cases[country][report][2]/new_cases[country][report-1][2]
growth_rate[country].append([report_no, growth_factor])
for country in COUNTRIES:
plt.plot(
[item[0] for item in growth_rate[country]],
[item[1] for item in growth_rate[country]],
label=country
)
plt.hlines(1, 14, report_no, linestyles='dashed')
plt.legend()
plt.show()
|
import datetime
from tests import BaseTestCase
from redash import models
from factories import dashboard_factory, query_factory, data_source_factory, query_result_factory
from redash.utils import gen_query_hash
class DashboardTest(BaseTestCase):
def test_appends_suffix_to_slug_when_duplicate(self):
d1 = dashboard_factory.create()
self.assertEquals(d1.slug, 'test')
d2 = dashboard_factory.create(user=d1.user)
self.assertNotEquals(d1.slug, d2.slug)
d3 = dashboard_factory.create(user=d1.user)
self.assertNotEquals(d1.slug, d3.slug)
self.assertNotEquals(d2.slug, d3.slug)
class QueryTest(BaseTestCase):
def test_changing_query_text_changes_hash(self):
q = query_factory.create()
old_hash = q.query_hash
models.Query.update_instance(q.id, query="SELECT 2;")
q = models.Query.get_by_id(q.id)
self.assertNotEquals(old_hash, q.query_hash)
class QueryResultTest(BaseTestCase):
def setUp(self):
super(QueryResultTest, self).setUp()
def test_get_latest_returns_none_if_not_found(self):
ds = data_source_factory.create()
found_query_result = models.QueryResult.get_latest(ds, "SELECT 1", 60)
self.assertIsNone(found_query_result)
def test_get_latest_returns_when_found(self):
qr = query_result_factory.create()
found_query_result = models.QueryResult.get_latest(qr.data_source, qr.query, 60)
self.assertEqual(qr, found_query_result)
def test_get_latest_works_with_data_source_id(self):
qr = query_result_factory.create()
found_query_result = models.QueryResult.get_latest(qr.data_source.id, qr.query, 60)
self.assertEqual(qr, found_query_result)
def test_get_latest_doesnt_return_query_from_different_data_source(self):
qr = query_result_factory.create()
data_source = data_source_factory.create()
found_query_result = models.QueryResult.get_latest(data_source, qr.query, 60)
self.assertIsNone(found_query_result)
def test_get_latest_doesnt_return_if_ttl_expired(self):
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
qr = query_result_factory.create(retrieved_at=yesterday)
found_query_result = models.QueryResult.get_latest(qr.data_source, qr.query, ttl=60)
self.assertIsNone(found_query_result)
def test_get_latest_returns_if_ttl_not_expired(self):
yesterday = datetime.datetime.now() - datetime.timedelta(seconds=30)
qr = query_result_factory.create(retrieved_at=yesterday)
found_query_result = models.QueryResult.get_latest(qr.data_source, qr.query, ttl=120)
self.assertEqual(found_query_result, qr)
def test_get_latest_returns_the_most_recent_result(self):
yesterday = datetime.datetime.now() - datetime.timedelta(seconds=30)
old_qr = query_result_factory.create(retrieved_at=yesterday)
qr = query_result_factory.create()
found_query_result = models.QueryResult.get_latest(qr.data_source, qr.query, 60)
self.assertEqual(found_query_result.id, qr.id)
def test_get_latest_returns_the_last_cached_result_for_negative_ttl(self):
yesterday = datetime.datetime.now() + datetime.timedelta(days=-100)
very_old = query_result_factory.create(retrieved_at=yesterday)
yesterday = datetime.datetime.now() + datetime.timedelta(days=-1)
qr = query_result_factory.create(retrieved_at=yesterday)
found_query_result = models.QueryResult.get_latest(qr.data_source, qr.query, -1)
self.assertEqual(found_query_result.id, qr.id)
class TestQueryResultStoreResult(BaseTestCase):
def setUp(self):
super(TestQueryResultStoreResult, self).setUp()
self.data_source = data_source_factory.create()
self.query = "SELECT 1"
self.query_hash = gen_query_hash(self.query)
self.runtime = 123
self.utcnow = datetime.datetime.utcnow()
self.data = "data"
def test_stores_the_result(self):
query_result = models.QueryResult.store_result(self.data_source.id, self.query_hash, self.query,
self.data, self.runtime, self.utcnow)
self.assertEqual(query_result.data, self.data)
self.assertEqual(query_result.runtime, self.runtime)
self.assertEqual(query_result.retrieved_at, self.utcnow)
self.assertEqual(query_result.query, self.query)
self.assertEqual(query_result.query_hash, self.query_hash)
self.assertEqual(query_result.data_source, self.data_source)
def test_updates_existing_queries(self):
query1 = query_factory.create(query=self.query, data_source=self.data_source)
query2 = query_factory.create(query=self.query, data_source=self.data_source)
query3 = query_factory.create(query=self.query, data_source=self.data_source)
query_result = models.QueryResult.store_result(self.data_source.id, self.query_hash, self.query, self.data,
self.runtime, self.utcnow)
self.assertEqual(models.Query.get_by_id(query1.id)._data['latest_query_data'], query_result.id)
self.assertEqual(models.Query.get_by_id(query2.id)._data['latest_query_data'], query_result.id)
self.assertEqual(models.Query.get_by_id(query3.id)._data['latest_query_data'], query_result.id)
def test_doesnt_update_queries_with_different_hash(self):
query1 = query_factory.create(query=self.query, data_source=self.data_source)
query2 = query_factory.create(query=self.query, data_source=self.data_source)
query3 = query_factory.create(query=self.query + "123", data_source=self.data_source)
query_result = models.QueryResult.store_result(self.data_source.id, self.query_hash, self.query, self.data,
self.runtime, self.utcnow)
self.assertEqual(models.Query.get_by_id(query1.id)._data['latest_query_data'], query_result.id)
self.assertEqual(models.Query.get_by_id(query2.id)._data['latest_query_data'], query_result.id)
self.assertNotEqual(models.Query.get_by_id(query3.id)._data['latest_query_data'], query_result.id)
def test_doesnt_update_queries_with_different_data_source(self):
query1 = query_factory.create(query=self.query, data_source=self.data_source)
query2 = query_factory.create(query=self.query, data_source=self.data_source)
query3 = query_factory.create(query=self.query, data_source=data_source_factory.create())
query_result = models.QueryResult.store_result(self.data_source.id, self.query_hash, self.query, self.data,
self.runtime, self.utcnow)
self.assertEqual(models.Query.get_by_id(query1.id)._data['latest_query_data'], query_result.id)
self.assertEqual(models.Query.get_by_id(query2.id)._data['latest_query_data'], query_result.id)
self.assertNotEqual(models.Query.get_by_id(query3.id)._data['latest_query_data'], query_result.id)
|
import colors as c
from types import SimpleNamespace as ns
from animation import Animation as anim
from hex_tile import hex_to_pixel
from gui import draw_box, draw_hex, draw_text
directions = ns(**{
"TOP":0,
"LEFT":1,
"BOTTOM":3,
"RIGHT":4
})
def white_hex_blink(selected, loop=2, ms=200):
def step(percent, off=(0, 0)):
r, g, b = c.WHITE
pos, ci = selected
coords = hex_to_pixel(pos)
highlight = (r, g, b, 255 * percent)
bc = c.YELLOW
draw_hex(coords, color=highlight, bc=bc, off=off)
return anim(ms, step, loop)
def slide_in(pos, size, drtn, ss=(50, 50), ms=1000):
x1, y1 = [(0 - l) if drtn < 2 else (sl + l) for l,sl in zip(size,ss)]
Δx, Δy = [(a - b) for a, b in zip(pos,(x1, y1))]
x2, y2 = pos
vert = drtn % 2 == 0
def step(percent, off=(0, 0)):
x = (x1 + Δx * percent) if not vert else x2
y = (y1 + Δy * percent) if vert else y2
draw_box((x, y), size)
return anim(ms, step, stick=1)
def slide_out(pos, size, drtn, ss=(50, 50), ms=1000):
end = [(0 - l) if drtn < 2 else (sl + l) for l,sl in zip(size,ss)]
Δx, Δy = [(b - a) for a, b in zip(pos,end)]
x2, y2 = pos
vert = drtn % 2 == 0
def step(percent, off=(0, 0)):
x = (x2 + Δx * percent) if not vert else x2
y = (y2 + Δy * percent) if vert else y2
draw_box((x, y), size)
return anim(ms, step)
def grow(pos, size, ms=1000):
start = [(p + p + s) / 2 for p, s in zip(pos, size)]
Δ = [a - b for a, b in zip(start, pos)]
def step(percent, off=(0, 0)):
x, y = [a - b * percent for a, b in zip(start, Δ)]
w, h = [(l * percent) for l in size]
draw_box((x, y), (w, h))
return anim(ms, step, stick=1)
def shrink(pos, size, ms=1000):
start = [(p + p + s) / 2 for p, s in zip(pos, size)]
Δ = [a - b for a, b in zip(start, pos)]
def step(percent, off=(0, 0)):
x, y = [a + b * percent for a, b in zip(pos, Δ)]
w, h = [(l * (1 - percent)) for l in size]
draw_box((x, y), (w, h))
return anim(ms, step)
def modal(text="hello"):
draw_text(text)
|
import csv
import itertools
import re
import hashlib
from androidcrypto import constants
import base64
import random
import logging
import json
import numpy as np
import pandas as pd
import copy
from collections.abc import Iterable
def decolorize_source_code(src_code):
reaesc = re.compile(r'\x1b[^m]*m')
return reaesc.sub('', src_code)
def flatten_list(lst):
return [item for sublist in lst for item in sublist]
def hash_file(path):
hasher = hashlib.sha256()
with open(path, 'rb') as afile:
buf = afile.read(constants.BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(constants.BLOCKSIZE)
return hasher.hexdigest()
def decode_base64_line_of_code(line_of_code):
splt = line_of_code.split('Base64.decode("')
if len(splt) > 1:
return base64.decodebytes(str.encode(splt[1].split('\"')[0]))
else:
return None
def shuffle_csv(clean_csv_path, shuffled_csv_path):
with open(clean_csv_path, 'r') as handle:
data = handle.readlines()
header, rest = data[0], data[1:]
random.shuffle(rest)
with open(shuffled_csv_path, 'w') as handle:
handle.write(header)
for line in rest:
handle.write(line)
def get_buckets(n_samples, dex_date_start, dex_date_end):
n_buckets = dex_date_end - dex_date_start + 1
bucket_size = n_samples // n_buckets
remainder = n_samples % n_buckets
return {key: bucket_size for key in range(dex_date_start, dex_date_end + 1)}, remainder
def filter_androzoo_dataset(n_samples, csv_path, dex_date_start, dex_date_end, min_vt_treshold, max_vt_treshold, max_size, strategy):
if strategy != constants.DOWNLOAD_STRATEGY_UNIFORM:
return None
samples_of_interest = []
buckets, remainder = get_buckets(n_samples, dex_date_start, dex_date_end)
for year, _ in zip(buckets.keys(), range(remainder, 0, -1)):
buckets[year] += 1
with open(csv_path, 'r') as csv_handle:
reader = csv.DictReader(csv_handle)
sampled = 0
while sampled < n_samples:
try:
row = next(reader)
except StopIteration:
logging.warning(f'While you wished to get {n_samples} samples from Androzoo, the filtered dataset contains only {sampled} samples.')
return samples_of_interest
virus_total = int(row['vt_detection']) if row['vt_detection'] != '' else 0
dex_date = int(row['dex_date'].split('-')[0]) if row['dex_date'].split('-')[0] != '' else 0
file_size = int(row['apk_size'])
if (min_vt_treshold <= virus_total <= max_vt_treshold) and buckets.get(dex_date, 0) > 0 and file_size < max_size:
samples_of_interest.append(row)
sampled += 1
buckets[dex_date] -= 1
return samples_of_interest
def androzoo_parse_year(dex_date):
return int(dex_date.split('-')[0]) if dex_date.split('-')[0] != '' else 0
def get_androzoo_url(sha256, api_key):
return 'https://androzoo.uni.lu/api/download?apikey=' + api_key + '&sha256=' + sha256
def merge_jsons(*args):
def merge_into_globalset(new_candidates):
new_ids = set(new_candidates.keys())
collisions = globalset_ids.intersection(new_ids)
globalset_ids.update(new_ids)
if collisions:
logging.warning(f'Collisions, introduced duplicates: {collisions}')
# we want to keep the oldest sample, thus we must start adding from the oldest
for sample_id in new_ids.difference(collisions):
global_data[sample_id] = new_dict[sample_id]
globalset_ids = set()
global_data = {}
for arg in args:
if isinstance(arg, str):
with open(arg, 'r') as handle:
new_dict = json.load(handle)
elif isinstance(arg, dict):
new_dict = arg
else:
logging.warning('Trying to merge jsons where one argument is neither path or dictionary.')
new_dict = {}
merge_into_globalset(new_dict)
return global_data
def introduce_missing_vals(df, cols):
def replace_empty(x):
if isinstance(x, Iterable):
return x if len(x) != 0 else np.nan
elif isinstance(x, str):
return x if x is not None else np.nan
elif x is None:
return np.nan
if not cols:
return df
new_df = copy.deepcopy(df)
for c in cols:
new_df[c] = new_df[c].apply(replace_empty)
return new_df
def drop_adware(df):
return df.loc[df.euphony_type != 'adware']
def print_delim(stream):
print('-' * 80, file=stream)
def get_third_party_libs_df(df, category):
data = []
for index, row in enumerate(df.itertuples()):
lst = getattr(row, category)
if isinstance(lst, list):
data.extend([(row.Index, row.year, x) for x in lst])
return pd.DataFrame(data, columns=['sha256', 'year', 'lib_name'])
def get_col_values_grouped_by_year_normalized(df, old_col_name, new_col_name, norm_factor, dropna=False):
if dropna is True:
series = df.dropna(subset=[old_col_name]).groupby('year')[old_col_name].value_counts()
else:
series = df.groupby('year')[old_col_name].value_counts()
norm_name = 'normalized_' + new_col_name
freq_df = pd.DataFrame(series).rename(columns={old_col_name: new_col_name}).reset_index()
freq_df[new_col_name] = freq_df[new_col_name].astype('float64')
freq_df[norm_name] = freq_df[new_col_name] / freq_df['year'].map(norm_factor)
freq_df[norm_name] = freq_df[norm_name].astype('int64')
return freq_df
def flatten(dct, include_obfuscated=False):
lst = [] if not include_obfuscated else [dct['obfuscated']]
for key in dct.keys():
if key != 'obfuscated':
lst.extend(dct[key])
return lst
|
#!"F:\\Python\\pythonw.exe"
print("Content-Type: text/html")
print()
import cgi
form = cgi.FieldStorage()
ac=form.getvalue('account')
#ac="655580033"
def main():
import pyodbc
conn=pyodbc.connect('Driver={SQL Server};''Server=suresh-pc;''Database=bank')
amount=int(form.getvalue('withdraw'))
otp=form.getvalue('otp')
otp1=form.getvalue('otp1')
b=conn.execute('select balance,name from bank where accountno=?',(ac))
for z in b:
pass
y=z[1]
if otp==otp1:
z=int(z[0])-int(amount)
b=conn.execute('select top 1 id from statment order by id desc')
for c in b:
ke=int(c[0])
ke=ke+1
if(z>=500):
conn.execute('update bank set balance=? where accountno=?',(z,ac))
conn.execute('insert into statment (id,accountno,currentbalance,debalance)values (?,?,?,?)',(ke,ac,z,amount))
conn.commit()
conn.close()
print("""
<html>
<head>
<title>Suresh Bank</title>
<script>
setTimeout(function()
{
window.location.href='account.py?ac=%s&c=%s'
},0);
alert('done')
</script>
</head>
<body>
</body>
</html>"""%(ac,y))
else:
print("""
<html>
<head>
<title>Suresh Bank</title>
<script>
setTimeout(function()
{
window.location.href='account.py?ac=%s&c=%s'
},0);
alert('insuffcient balance')
</script>
</head>
<body onload="setTimeout()">
</body>
</html>"""%(ac,y))
else:
print("""
<html>
<head>
<title>Suresh Bank</title>
<script>
setTimeout(function()
{
window.location.href='account.py?ac=%s&c=%s'
},0);
alert('wrong otp')
</script>
</head>
<body onload="setTimeout()">
</body>
</html>"""%(ac,y))
main()
|
#!python3
# Mark Bykerk KauffmanO.
# Parses command line args and uses VideoLinkCreator to create a link to a URL for a video.
# Sample Use: python create_link.py --learnfqdn "kauffman380011.ddns.net" --key "1237d7ee-80ab-123c-afc5-123ddd7c31bc" --secret "kL1239NnNk0s1234UUm0muL19Xmt1234" --course_id "uuid:fc005cb3865a486981f221bd24111007" --video_url "https:www.microsoft.com" --title "Microsoft Home" --description "A link to Microsoft"
import argparse
import datetime
from panopto_oauth2 import PanoptoOAuth2
from panopto_uploader import PanoptoUploader
from video_link_creator import VideoLinkCreator
import time
import urllib3
def parse_argument():
'''
Argument definition and handling.
'''
parser = argparse.ArgumentParser(description='Create content at the root of an Ultra course that is a URL.')
parser.add_argument('--learnfqdn', dest='learnfqdn', required=True, help='Learn Server name as FQDN')
parser.add_argument('--key', dest='key', required=True, help='Registered REST API Key')
parser.add_argument('--secret', dest='secret', required=True, help='Registered REST API Secret')
parser.add_argument('--course_id', dest='course_id', required=True, help='courseId where we create the content. courseId:<id>|uuid:<uuid>|pk1')
parser.add_argument('--video_url', dest='video_url', required=True, help='The https link to the video.')
parser.add_argument('--title', dest='title', required=True, help='Title for the content of the link.')
parser.add_argument('--description', dest='description', required=True, help='Description for the link.')
return parser.parse_args()
def main():
'''
Main method
'''
args = parse_argument()
print("current date and time is..")
localtime = time.asctime(time.localtime(time.time()))
print(localtime)
link_creator = VideoLinkCreator(args.learnfqdn, args.key, args.secret, args.course_id, args.video_url, args.title, args.description)
link_creator.create_video_link()
print("current date and time is..")
localtime = time.asctime(time.localtime(time.time()))
print(localtime)
if __name__ == '__main__':
main()
|
# Contiguous Array
# Given a binary array, find the maximum length of a contiguous subarray with equal number of 0 and 1.
# Example 1:
# Input: [0,1]
# Output: 2
# Explanation: [0, 1] is the longest contiguous subarray with equal number of 0 and 1.
# Example 2:
# Input: [0,1,0]
# Output: 2
# Explanation: [0, 1] (or [1, 0]) is a longest contiguous subarray with equal number of 0 and 1.
# Note: The length of the given binary array will not exceed 50,000`
class Solution(object):
def findMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
count = 0
max_length=0
table = {0: 0}
for index, num in enumerate(nums, 1):
if num == 0:
count -= 1
else:
count += 1
if count in table:
max_length = max(max_length, index - table[count])
else:
table[count] = index
return max_length
# Better than 90%. I think this is good enough. Yes.
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#Display whether the current year is a leap year
year = 2020
if year % 400 == 0:
print('2020 is a leap year')
elif year % 4 == 0 and year % 100 != 0:
print('2020 is a leap year')
else:
print('2020 is not a leap year')
# In[ ]:
|
# %%
#caesar cipher
class his_cip:
def __init__(self):
self.alf = "abcdefghijklmnopqrstuvwxyz"
self.alf2 = self.alf*2
self.len_ = len(self.alf)
def caesar_enc(self,plaintext,key):
ciphertext = ""
alpha = self.alf*2
#replace dots and getting all in lower case
plaintext = plaintext.lower()
#control if plain is a string and key is a number
if isinstance(plaintext,str) and isinstance(key,int) and plaintext.isalpha():
key = key % self.len_
for letter in plaintext:
# #if there is a space
# if ord(letter)==32:
# ciphertext += letter
# else:
ciphertext += self.alf2[self.alf2.index(letter)+key]
else:
print("ERRORE -> solo lettere (A-Z)")
return
return ciphertext
def caesar_dec(self,ciphertext,key):
plaintext = ""
if isinstance(ciphertext,str) and isinstance(key,int):
key = key % self.len_
for letter in ciphertext:
# #if there is a space
# if ord(letter)==32:
# plaintext += letter
# else:
plaintext += self.alf2[self.alf2.index(letter)-key]
return plaintext
#encryption with numbers first try
# def caesar_enc2(self,plaintext,key):
# ciphertext = ""
# vet=[0,1,2,3,4,5,6,7,8,9]
# if isinstance(plaintext,str) and isinstance(key,int):
# key = key%26
# plaintext = plaintext.replace(".","").lower()
# for letter in plaintext:
# #if there is a space
# if ord(letter)==32:
# ciphertext += letter
# elif ord(letter)>=48 and ord(letter)<=57:
# c=(((ord(letter)%48)+key))+48
# ciphertext += chr(c)
# else:
# #97 is the unicode position for "a"
# c=(((ord(letter)%97)+key))+97
# ciphertext += chr(c)
# return ciphertext
# def caesar_dec2(self,ciphertext,key):
# plaintext = ""
# vet=[0,1,2,3,4,5,6,7,8,9]
# if isinstance(ciphertext,str) and isinstance(key,int):
# key = key%26
# for letter in ciphertext:
# if ord(letter)==32:
# plaintext += letter
# elif ord(letter)>=48 and ord(letter)<=57:
# c=(((ord(letter)%48)-key))+48
# plaintext += chr(c)
# else:
# #97 is the unicode position for "a"
# c=(((ord(letter)%97)-key))+97
# plaintext += chr(c)
# return plaintext
def vigenere_enc(self,PlainText,key):
lenPlainText = len(PlainText)
lenKey = len(key)
x = int(lenPlainText/lenKey)
keyMatched = (key*(x+1))[0:(lenPlainText)]
cipherText = ""
for x in range(len(keyMatched)):
letterPlain = PlainText[x]
letterKey = keyMatched[x]
cipherLetter = self.caesar_enc(letterPlain,ord(letterKey)%self.len_)
cipherText += cipherLetter
return cipherText
def vigenere_dec(self,cipherText,key):
lenCipherText = len(cipherText)
lenKey = len(key)
x = int(lenCipherText/lenKey)
keymatched = (key*(x+1))[0:(lenCipherText)]
PlainText = ""
for x in range(len(keymatched)):
letterCipher = cipherText[x]
letterKey = keymatched[x]
plainLetter = self.caesar_dec(letterCipher,ord(letterKey)%self.len_)
PlainText += plainLetter
return PlainText
#%%
a=his_cip()
plain="porcodio"
key="lemon"
cipher = a.vigenere_enc(plain,key)
print(cipher)
plaint = a.vigenere_dec(cipher,key)
print(plaint)
# %%
|
from typing import Dict
from pyspark.sql import SparkSession, Column, DataFrame
# noinspection PyUnresolvedReferences
from pyspark.sql.functions import col, regexp_replace, substring
from pyspark.sql.functions import coalesce, lit, to_date
from spark_auto_mapper.automappers.automapper import AutoMapper
from spark_auto_mapper.helpers.automapper_helpers import AutoMapperHelpers as A
from spark_auto_mapper_fhir.complex_types.human_name import HumanName
from spark_auto_mapper_fhir.fhir_types.id import FhirId
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.resources.patient import Patient
from spark_auto_mapper_fhir.value_sets.administrative_gender import (
AdministrativeGenderCode,
)
from spark_auto_mapper_fhir.value_sets.name_use import NameUseCode
def test_auto_mapper_fhir_patient_resource(spark_session: SparkSession) -> None:
# Arrange
spark_session.createDataFrame(
[
(1, "Qureshi", "Imran", "1970-01-01", "female"),
(2, "Vidal", "Michael", "1970-02-02", None),
],
["member_id", "last_name", "first_name", "date_of_birth", "my_gender"],
).createOrReplaceTempView("patients")
source_df: DataFrame = spark_session.table("patients")
df = source_df.select("member_id")
df.createOrReplaceTempView("members")
# Act
mapper = AutoMapper(
view="members", source_view="patients", keys=["member_id"]
).complex(
Patient(
id_=FhirId(A.column("member_id")),
birthDate=A.date(A.column("date_of_birth")),
name=FhirList(
[HumanName(use=NameUseCode("usual"), family=A.column("last_name"))]
),
gender=A.if_not_null(
A.column("my_gender"), AdministrativeGenderCode(A.column("my_gender"))
),
)
)
assert isinstance(mapper, AutoMapper)
sql_expressions: Dict[str, Column] = mapper.get_column_specs(source_df=source_df)
for column_name, sql_expression in sql_expressions.items():
print(f"{column_name}: {sql_expression}")
result_df: DataFrame = mapper.transform(df=df)
# Assert
assert len(sql_expressions) == 5
assert str(sql_expressions["id"]) == str(
substring(regexp_replace(col("b.member_id"), r"[^A-Za-z0-9\-\.]", "-"), 0, 63)
.cast("string")
.alias("id")
)
assert str(sql_expressions["resourceType"]) == str(
lit("Patient").cast("string").alias("resourceType")
)
assert str(sql_expressions["birthDate"]) == str(
coalesce(
to_date(col("b.date_of_birth"), "y-M-d"),
to_date(col("b.date_of_birth"), "yyyyMMdd"),
to_date(col("b.date_of_birth"), "M/d/y"),
)
.cast("date")
.alias("birthDate")
)
# assert str(sql_expressions["name"]) == str(
# filter(
# array(
# struct(
# lit("usual").alias("use"),
# col("b.last_name").alias("family"),
# )
# ), lambda x: x.isNotNull()
# ).alias("name")
# )
# assert str(sql_expressions["gender"]) == str(
# when(col("b.my_gender").isNull(),
# None).otherwise(col("b.my_gender")).alias("gender")
# )
result_df.printSchema()
result_df.show()
assert (
result_df.where("member_id == 1").selectExpr("name[0].use").collect()[0][0]
== "usual"
)
assert (
result_df.where("member_id == 1").selectExpr("name[0].family").collect()[0][0]
== "Qureshi"
)
assert (
result_df.where("member_id == 2").selectExpr("name[0].use").collect()[0][0]
== "usual"
)
assert (
result_df.where("member_id == 2").selectExpr("name[0].family").collect()[0][0]
== "Vidal"
)
|
from database import session
from models.ratingsModel import Rating
from models.newsModel import News
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
class GraphController(object):
def getRatingsGraph(self, nid: int):
ratings = session.query(Rating.rating).where(Rating.news_id == nid).order_by(Rating.date).all()
count = len(ratings)
title = session.query(News.title).where(News.id == nid).one()
graphTitle = f"Ratings for \"{title[0]}\""
listed = list(zip(*ratings))
ts = np.array(listed[0])
avg = np.mean(ts)
if ratings[count - 1].rating - ratings[0].rating > avg:
graphTitle += " (it's a trend!)"
delta = 0.2
fig = plt.figure()
axes = fig.add_subplot(111)
axes.set_title(graphTitle)
axes.set_xlabel("Count")
axes.set_ylabel("Value")
axes.axhline(avg, label="Trend boundary", linestyle="--", color="red")
axes.set_ylim(np.min(ts) - delta, np.max(ts) + delta)
axes.plot(np.arange(1, count + 1).astype(str), ts, linestyle="-", color="black", label="Rating")
axes.legend()
plt.show()
def getTopTagsGraph(self):
results = session.execute("SELECT name, avg FROM "
"(SELECT tag_id, avg(rating) FROM news_tags "
"INNER JOIN news n ON n.id = news_tags.news_id "
"GROUP BY tag_id "
"LIMIT 10) AS t "
"INNER JOIN tags ON tags.id = t.tag_id "
"ORDER BY avg DESC").all()
listed = list(zip(*results))
series = pd.Series(np.array(listed[1]), index=listed[0], name='')
series.plot.pie(figsize=(9, 7), title="Top 10 Rated Tags:")
plt.show()
def getTrendingTags(self):
results = session.execute("SELECT name, (avg / count) AS k FROM "
"(SELECT tag_id, count(tag_id) FROM news_tags "
"GROUP BY tag_id) AS t1 "
"INNER JOIN "
"(SELECT tag_id, name, avg FROM "
"(SELECT tag_id, avg(rating) FROM news_tags "
"INNER JOIN news n ON n.id = news_tags.news_id "
"GROUP BY tag_id) AS t "
"INNER JOIN tags ON tags.id = t.tag_id) AS t2 "
"ON t1.tag_id = t2.tag_id "
"ORDER BY k DESC "
"LIMIT 10").all()
listed = list(zip(*results))
k = np.array([math.log10(item * 1000) for item in listed[1]])
avg = k.mean()
df = pd.DataFrame({'coefficient': k}, index=listed[0])
df.plot.bar(figsize=(9, 7), title="Top 10 Tags By Trend Coefficient")
plt.axhline(avg, label="Trend boundary", linestyle="--", color="red")
plt.legend()
plt.show()
|
import asyncio
from typing import Callable
from typing import Dict
from typing import List
class HandlersHolder:
def __init__(self):
self._on_event_update: Dict[str, List[Callable]] = {
'RAW_UPDATE_HANDLER': [],
'STREAM_END_HANDLER': [],
'INVITE_HANDLER': [],
'KICK_HANDLER': [],
'CLOSED_HANDLER': [],
'LEFT_HANDLER': [],
'PARTICIPANTS_LIST': [],
}
async def propagate(
self,
event_name: str,
*args,
**kwargs,
):
for event in self._on_event_update[event_name]:
asyncio.ensure_future(event(*args, **kwargs))
def add_handler(
self,
event_name: str,
func: Callable,
):
self._on_event_update[event_name].append(func)
|
# -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2021 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Facilitate Tanium report to NIST OSCAL json transformation."""
import datetime
import logging
import traceback
import uuid
from typing import Any, Dict, List, Union, ValuesView
from trestle.oscal.assessment_results import ControlSelection
from trestle.oscal.assessment_results import LocalDefinitions1
from trestle.oscal.assessment_results import Observation
from trestle.oscal.assessment_results import Result
from trestle.oscal.assessment_results import ReviewedControls
from trestle.oscal.assessment_results import Status1
from trestle.oscal.assessment_results import SystemComponent
from trestle.oscal.common import ImplementedComponent, InventoryItem, Property, SubjectReference
logger = logging.getLogger(__name__)
t_analysis = Dict[str, Any]
t_component = SystemComponent
t_component_ref = str
t_computer_name = str
t_control = str
t_control_selection = ControlSelection
t_inventory = InventoryItem
t_inventory_ref = str
t_local_definitions = LocalDefinitions1
t_observation = Observation
t_oscal = Union[str, Dict[str, Any]]
t_tanium_collection = Any
t_tanium_row = Dict[str, Any]
t_timestamp = str
t_resource = Dict[str, Any]
t_result = Result
t_reviewed_controls = ReviewedControls
t_component_map = Dict[t_component_ref, t_component]
t_inventory_map = Dict[t_computer_name, t_inventory]
t_observation_list = List[Observation]
class RuleUse():
"""Represents one row of Tanium data."""
def __init__(self, tanium_row: t_tanium_row, comply, default_timestamp: t_timestamp) -> None:
"""Initialize given specified args."""
logger.debug(f'tanium-row: {tanium_row}')
try:
# level 1 keys
self.computer_name = tanium_row['Computer Name']
self.tanium_client_ip_address = tanium_row['Tanium Client IP Address']
self.ip_address = str(tanium_row['IP Address'])
self.count = str(tanium_row['Count'])
# comply keys
self.check_id = comply['Check ID']
self.rule_id = comply['Rule ID']
self.state = comply['State']
#
self.check_id_level = '[no results]'
self.check_id_version = '[no results]'
self.check_id_benchmark = '[no results]'
self.component = '[no results]'
self.component_type = '[no results]'
#
if ';' in self.check_id:
items = self.check_id.split(';')
if len(items) > 2:
self.check_id_level = items[2]
if len(items) > 1:
self.check_id_version = items[1]
if len(items) > 0:
self.check_id_benchmark = items[0]
self.component = items[0]
if self.component.startswith('CIS '):
self.component = self.component[len('CIS '):]
if self.component.endswith(' Benchmark'):
self.component = self.component[:-len(' Benchmark')]
self.component_type = 'Operating System'
#
self.timestamp = comply.get('Timestamp', default_timestamp)
#
self.collected = default_timestamp
except Exception as e:
logger.debug(f'tanium-row: {tanium_row}')
logger.debug(e)
logger.debug(traceback.format_exc())
raise e
return
class ResultsMgr():
"""Represents collection of data to transformed into an AssessmentResult.results."""
# the current time for consistent timestamping
timestamp = datetime.datetime.utcnow().replace(microsecond=0).replace(tzinfo=datetime.timezone.utc).isoformat()
@staticmethod
def set_timestamp(value: str) -> None:
"""Set the default timestamp value."""
datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S%z')
ResultsMgr.timestamp = value
@staticmethod
def get_timestamp() -> str:
"""Get the default timestamp value."""
return ResultsMgr.timestamp
def __init__(self) -> None:
"""Initialize."""
self.observation_list: t_observation_list = []
self.component_map: t_component_map = {}
self.inventory_map: t_inventory_map = {}
self.ns = 'https://ibm.github.io/compliance-trestle/schemas/oscal/ar/tanium'
# track ip-address to computer-name
self.map_ip_address_to_computer_name = {}
# list of controls
self.control_list = []
@property
def controls(self) -> t_component_map:
"""OSCAL controls."""
return sorted(self.control_list)
@property
def components(self) -> t_component_map:
"""OSCAL components."""
return list(self.component_map.values())
@property
def inventory(self) -> ValuesView[InventoryItem]:
"""OSCAL inventory."""
return self.inventory_map.values()
@property
def observations(self) -> List[t_observation]:
"""OSCAL observations."""
return self.observation_list
@property
def control_selections(self) -> List[t_control_selection]:
"""OSCAL control selections."""
prop = []
prop.append(ControlSelection())
return prop
@property
def local_definitions(self) -> t_local_definitions:
"""OSCAL local definitions."""
prop = LocalDefinitions1()
prop.components = self.components
prop.inventory_items = list(self.inventory)
return prop
@property
def reviewed_controls(self) -> t_reviewed_controls:
"""OSCAL reviewed controls."""
prop = ReviewedControls(control_selections=self.control_selections)
return prop
@property
def result(self) -> t_result:
"""OSCAL result."""
prop = Result(
uuid=str(uuid.uuid4()),
title='Tanium',
description='Tanium',
start=ResultsMgr.timestamp,
end=ResultsMgr.timestamp,
reviewed_controls=self.reviewed_controls,
local_definitions=self.local_definitions,
observations=self.observations
)
return prop
@property
def analysis(self) -> List[str]:
"""OSCAL statistics."""
logger.debug(f'controls: {self.controls}')
analysis = []
analysis.append(f'inventory: {len(self.inventory)}')
analysis.append(f'observations: {len(self.observations)}')
return analysis
def _get_inventory_ref(self, rule_use: RuleUse) -> t_inventory_ref:
"""Get inventory reference for specified rule use."""
return self.inventory_map[rule_use.tanium_client_ip_address].uuid
def _component_extract(self, rule_use: RuleUse) -> None:
"""Extract component from Tanium row."""
component_type = rule_use.component_type
component_title = rule_use.component
component_description = rule_use.component
for component in self.component_map.values():
if component.type == component_type:
if component.title == component_title:
if component.description == component_description:
return
component_ref = str(uuid.uuid4())
status = Status1(state='operational')
component = SystemComponent(
uuid=component_ref,
type=component_type,
title=component_title,
description=component_description,
status=status
)
self.component_map[component_ref] = component
def _get_component_ref(self, rule_use: RuleUse) -> t_component_ref:
"""Get component reference for specified rule use."""
uuid = None
component_type = rule_use.component_type
component_title = rule_use.component
component_description = rule_use.component
for component_ref, component in self.component_map.items():
if component.type == component_type:
if component.title == component_title:
if component.description == component_description:
uuid = component_ref
break
return uuid
def _inventory_extract(self, rule_use: RuleUse) -> None:
"""Extract inventory from Tanium row."""
if rule_use.tanium_client_ip_address in self.inventory_map:
pass
else:
inventory = InventoryItem(uuid=str(uuid.uuid4()), description='inventory')
props = []
props.append(Property(name='Computer_Name', value=rule_use.computer_name, ns=self.ns))
props.append(
Property(
name='Tanium_Client_IP_Address',
value=rule_use.tanium_client_ip_address,
ns=self.ns,
class_='scc_inventory_item_id'
)
)
props.append(Property(name='IP_Address', value=rule_use.ip_address, ns=self.ns))
props.append(Property(name='Count', value=rule_use.count, ns=self.ns))
inventory.props = props
inventory.implemented_components = [ImplementedComponent(component_uuid=self._get_component_ref(rule_use))]
self.inventory_map[rule_use.tanium_client_ip_address] = inventory
def _observation_extract(self, rule_use: RuleUse) -> None:
"""Extract observation from Tanium row."""
observation = Observation(
uuid=str(uuid.uuid4()),
description=rule_use.rule_id,
methods=['TEST-AUTOMATED'],
collected=rule_use.collected
)
subject_reference = SubjectReference(subject_uuid=self._get_inventory_ref(rule_use), type='inventory-item')
observation.subjects = [subject_reference]
props = [
Property(name='Check_ID', value=rule_use.check_id, ns=self.ns),
Property(
name='Check_ID_Benchmark',
value=rule_use.check_id_benchmark,
ns=self.ns,
class_='scc_predefined_profile'
),
Property(
name='Check_ID_Version',
value=rule_use.check_id_version,
ns=self.ns,
class_='scc_predefined_profile_version'
),
Property(name='Check_ID_Level', value=rule_use.check_id_level, ns=self.ns),
Property(name='Rule_ID', value=rule_use.rule_id, ns=self.ns, class_='scc_goal_description'),
Property(name='Rule_ID', value=rule_use.rule_id, ns=self.ns, class_='scc_check_name_id'),
Property(name='State', value=rule_use.state, ns=self.ns, class_='scc_result'),
Property(name='Timestamp', value=rule_use.timestamp, ns=self.ns, class_='scc_timestamp'),
]
observation.props = props
self.observation_list.append(observation)
rule_use.observation = observation
def _process(self, rule_use: RuleUse) -> None:
self._component_extract(rule_use)
self._inventory_extract(rule_use)
self._observation_extract(rule_use)
def ingest(self, tanium_row: t_tanium_row) -> None:
"""Process one row of Tanium."""
keys = tanium_row.keys()
for key in keys:
if key.startswith('Comply'):
break
comply_list = tanium_row[key]
for comply in comply_list:
rule_use = RuleUse(tanium_row, comply, ResultsMgr.timestamp)
self._process(rule_use)
|
import torch
import torch.nn as nn
import torch_tensorrt.fx.tracer.acc_tracer.acc_ops as acc_ops
from parameterized import parameterized
from torch.testing._internal.common_fx2trt import AccTestCase
from torch.testing._internal.common_utils import run_tests
class TestConverter(AccTestCase):
@parameterized.expand(
[
("2d_dim", "ij,jk->ik", (2, 3), (3, 4)),
("2d_dim_ext", "ij,kj->ik", (2, 3), (4, 3)),
("3d_dim", "cxd,cyd->cxy", (3, 4, 5), (3, 6, 5)),
("4d_dim", "bcwd,bcdh->bcwh", (2, 3, 4, 5), (2, 3, 5, 6)),
("4d_dim_ext", "bcxd,bcyd->bcxy", (2, 3, 4, 5), (2, 3, 6, 5)),
# TRT does not support ellipsis or diagonal operations
]
)
def test_einsum(self, _, equation, x_size, y_size):
class Einsum(nn.Module):
def forward(self, x, y):
return torch.einsum(equation, x, y)
inputs = [torch.randn(*x_size), torch.randn(*y_size)]
self.run_test(
Einsum(),
inputs,
expected_ops={acc_ops.einsum},
test_implicit_batch_dim=False,
)
if __name__ == "__main__":
run_tests()
|
#! /usr/bin/python
"""Module for Matches.
This module builds the Match base class with validation
for its fields. When validation is stronger than simple type
validation, the Mark class is used in replace of traditional str or int classes to
track accuracy.
Example:
>>>match = CollegeMatch(**kwargs)
"""
from datetime import datetime, time
from collections import Counter
from typing import Optional, Dict, Tuple, Union
from urllib.parse import quote
import attr
from attr.validators import instance_of
from wrestling import base
from wrestling.events import Event
from wrestling.scoring import CollegeScoring, HSScoring
from wrestling.sequence import isvalid_sequence
from wrestling.wrestlers import Wrestler
@attr.s(slots=True, order=True, eq=True, kw_only=True, auto_attribs=True)
class Match(object):
"""Match base class.
Args:
id (str): Match id.
base_url (Optional[Union[str, None]]): Url to prepend to 'id', default to None.
event (Event): Event instance for the event the match occurred at.
date (datetime): Datetime the match occurred at.
result (Result): Result of the match.
overtime ([Optional[bool]]): If the match went into overtime, default to False.
focus (Wrestler): Wrestler instance for the primary wrestler.
opponent (Wrestler): Wrestler instance for the opponent.
weight (Mark): Weight class the match was contested at.
isvalid (bool): Whether the match is valid or has errors.
invalid_messages (tuple): Tuple of (brief) match error messages, can be empty.
invalid_count (int): Count of invalid Marks found in the match.
Raises:
ValueError: Overtime cannot be True if Result method is Tech.
"""
_id: str = attr.ib(validator=instance_of(str), repr=False, order=False)
# enter at your own risk
base_url: Optional[Union[str, None]] = attr.ib(
default=None, repr=False, order=False
)
event: Event = attr.ib(
validator=instance_of(Event), repr=lambda x: x.name, order=False
)
date: Union[str, datetime] = attr.ib(validator=instance_of((datetime, str)), order=True, repr=False)
result: base.Result = attr.ib(
validator=instance_of(base.Result), order=False, repr=lambda x: x.text
)
overtime: Optional[bool] = attr.ib(
validator=instance_of(bool), order=False, repr=False, default=False
)
focus: Wrestler = attr.ib(
validator=instance_of(Wrestler), order=False, repr=lambda x: x.name
)
opponent: Wrestler = attr.ib(
validator=instance_of(Wrestler), order=False, repr=lambda x: x.name
)
_weight: base.Mark = attr.ib(validator=instance_of(base.Mark), repr=lambda x: x.tag)
isvalid: bool = attr.ib(init=False, repr=False, order=False, eq=False)
invalid_messages: Tuple = attr.ib(
init=False, factory=tuple, repr=False, order=False, eq=False
)
invalid_count: int = attr.ib(init=False, repr=False, order=False, eq=False)
def __attrs_post_init__(self):
"""Post init function to call Mark input handlers."""
self.check_weight_input()
self.isvalid = self.set_validity()
@overtime.validator
def check_overtime(self, attribute, value):
"""Checks overtime validity."""
if self.result == base.Result.WT or self.result == base.Result.LT:
if value: # if overtime is True
raise ValueError(f"Overtime must be false if match resulted in Tech.")
@property
def weight(self) -> str:
"""Weight class match contested at.
Returns:
str: Weight.
"""
return str(self._weight.tag)
def check_weight_input(self):
"""Function to manage validity of 'kind' input attribute via Mark class."""
if not self._weight.tag.isdigit():
message = (
f"Invalid weight value, expected a number, "
f"got {self._weight.tag}."
)
self._weight.isvalid = False
self._weight.msg = message
@property
def video_url(self) -> Union[str, None]:
"""Video url: concats base_url and id.
Returns:
str: video_url
"""
return f"{self.base_url}/{quote(self._id)}" if self.base_url else None
@property
def focus_pts(self) -> int:
"""Number of points the primary wrestler scored.
Returns:
int: Focus points scored
"""
return self.calculate_pts("f")
@property
def opp_pts(self):
"""Number of points the opponent wrestler scored.
Returns:
int: Opponent points scored
"""
return self.calculate_pts("o")
@property
def mov(self) -> int:
"""Margin of Victory.
Returns:
int: Difference between focus_points and opponent_points
"""
return self.focus_pts - self.opp_pts
@property
def td_diff(self) -> int:
"""Takedown differential.
Returns:
int: Difference in primary wrestler takedowns and opponent takedowns
"""
counts = Counter((score.formatted_label for score in self.time_series))
return counts.get('fT2', 0) - counts.get('oT2', 0)
def set_validity(self) -> bool:
"""Identifies instance validity status.
This method returns boolean and is used in the attrs post_init hook to set the
instance 'isvalid' attribute. However, this method also sets the instance
'invalid_messages' and 'invalid_count' attributes according to the errors it
detects when searching the instance.
Any errors detected are input as brief descriptors into the
'invalid_messages' attribute of the instance.
The 'invalid_counts' is simply a count of how many errors were discovered.
Returns:
bool: True if all Marks are valid, else False (if any Marks are invalid).
"""
messages = []
status = True
if isinstance(self._weight, base.Mark) and not self._weight.isvalid:
messages.append("Invalid weight class.")
status = False
if not all((score.label.isvalid for score in getattr(self, "time_series"))):
messages.append("Invalid time-series label.")
status = False
if isinstance(self.event._kind, base.Mark) and not self.event._kind.isvalid:
messages.append("Invalid event type.")
status = False
self.invalid_messages = tuple(messages)
self.invalid_count = len(messages)
return status
def calculate_pts(self, athlete_filter: str) -> int:
"""Calculate total points scored.
Args:
athlete_filter: 'f' or 'o' to filter by 'focus' or 'opponent'
Returns:
int: Sum of points scored.
"""
return sum(
(
action.label.point_value
for action in getattr(self, "time_series")
if action.formatted_label.startswith(athlete_filter)
)
)
def to_dict(
self, ts_only: Optional[bool] = False, results_only: Optional[bool] = False
) -> Union[Dict, Tuple]:
"""Converts instance to dict representation.
Args:
ts_only: If you only want the time_series of the instance. Defaults to False.
results_only: If you only want the results of the instance. Defaults to False.
Returns:
Dict[str, Union[str, int]]: Dictionary of instance values.
"""
if ts_only:
ts = tuple(
dict(
x.to_dict(),
**dict(
focus_name=getattr(self, "focus").name,
opp_name=getattr(self, "opponent").name,
event_name=getattr(self, "event").name,
),
)
for x in getattr(self, "time_series")
)
return ts
elif results_only:
result = getattr(self, "result").text
binary, method = result.split()
return dict(binary=binary, method=method)
else:
return dict(
focus_name=getattr(self, "focus").name,
focus_team=getattr(self, "focus").team,
opp_name=getattr(self, "opponent").name,
opp_team=getattr(self, "opponent").team,
weight=getattr(self, "weight"),
event_name=getattr(self, "event").name,
event_type=getattr(self, "event").kind,
date=str(getattr(self, 'date')),
text_result=getattr(self, "result").text,
num_result=getattr(self, "result").value,
duration=getattr(self, "duration"),
overtime=getattr(self, "overtime"),
video=getattr(self, "video_url"),
win=getattr(self, "result").win,
bonus=getattr(self, "result").bonus,
pin=getattr(self, "result").pin,
team_pts=getattr(self, "result").team_points,
focus_pts=getattr(self, "focus_pts"),
opp_pts=getattr(self, "opp_pts"),
mov=getattr(self, "mov"),
td_diff=getattr(self, "td_diff"),
)
@attr.s(slots=True, order=True, eq=True, kw_only=True, auto_attribs=True)
class CollegeMatch(Match):
"""Match for college ruleset.
Args:
duration (Optional[int]): Length of match, defaults to 420.
time_series (Tuple([CollegeScoring])): sequence of scoring events.
Raises:
TypeError: All items in time_series must be CollegeScoring instances.
ValueError: time_series must be sorted chronologically.
"""
duration: Optional[int] = attr.ib(default=420, validator=instance_of(int))
# auto sorts (based on time)
time_series: Tuple[CollegeScoring] = attr.ib(
validator=instance_of(Tuple), order=False, repr=lambda x: f"{len(x)} actions"
)
def __attrs_post_init__(self):
"""Post init function to call Mark input handlers from super-class."""
Match.__attrs_post_init__(self)
self.add_college_ts_points()
@time_series.validator
def check_time_series(self, attribute, value):
"""Validates that all time_series are of the correct type and in the correct order."""
if not all(isinstance(event, CollegeScoring) for event in value):
raise TypeError(
f"All of the items in the `time_series` set must be "
f"`CollegeScoring` objects."
)
if not isvalid_sequence("college", value):
raise ValueError(f"Time series sequence appears invalid...")
def add_college_ts_points(self):
for i, score in enumerate(self.time_series):
if i == 0:
if score.formatted_label.startswith('f'):
self.time_series[i].focus_score = self.time_series[i].label.point_value
self.time_series[i].opp_score = 0
elif score.formatted_label.startswith('o'):
self.time_series[i].focus_score = 0
self.time_series[i].opp_score = self.time_series[i].label.point_value
continue
if score.formatted_label.startswith('f'):
self.time_series[i].focus_score = self.time_series[i].label.point_value + \
self.time_series[i - 1].focus_score
self.time_series[i].opp_score = self.time_series[i - 1].opp_score
elif score.formatted_label.startswith('o'):
self.time_series[i].focus_score = self.time_series[i - 1].focus_score
self.time_series[i].opp_score = self.time_series[i].label.point_value + self.time_series[i - 1].opp_score
else:
raise ValueError(
f"Invalid `formatted_label`, expected startswith = 'o' or 'f', "
f"got {score.formatted_label!r}")
ts = list(self.time_series)
ts.insert(
0,
CollegeScoring(
time_stamp=str(time(hour=0, minute=0, second=0)),
initiator='red',
focus_color='red',
period=1,
label=base.CollegeLabel('START')
)
)
self.time_series = tuple(ts)
return True
@attr.s(slots=True, order=True, eq=True, kw_only=True, auto_attribs=True)
class HSMatch(Match):
"""Match for college ruleset.
Args:
duration (Optional[int]): Length of match, defaults to 360.
time_series (Tuple([HSScoring])): sequence of scoring events.
Raises:
TypeError: All items in time_series must be HSScoring instances.
ValueError: time_series must be sorted chronologically.
"""
duration: Optional[int] = attr.ib(default=360, validator=instance_of(int))
# auto sorts (based on time)
time_series: Tuple[HSScoring] = attr.ib(
order=False, repr=lambda x: f"{len(x)} actions"
)
def __attrs_post_init__(self):
"""Post init function to call Mark input handlers from super-class."""
Match.__attrs_post_init__(self)
self.add_hs_ts_points()
@time_series.validator
def check_time_series(self, attribute, value):
"""Validates that all time_series are of the correct type and in the correct order."""
if not all(isinstance(event, HSScoring) for event in value):
raise TypeError(
f"All of the items in the `time_series` set must be "
f"`HighSchoolScoring` objects."
)
if not isvalid_sequence("high school", value):
raise ValueError(f"Time series sequence appears invalid...")
def add_hs_ts_points(self):
for i, score in enumerate(self.time_series):
if i == 0:
if score.formatted_label.startswith('f'):
self.time_series[i].focus_score = self.time_series[i].label.point_value
self.time_series[i].opp_score = 0
elif score.formatted_label.startswith('o'):
self.time_series[i].focus_score = 0
self.time_series[i].opp_score = self.time_series[i].label.point_value
continue
if score.formatted_label.startswith('f'):
self.time_series[i].focus_score = self.time_series[i].label.point_value + \
self.time_series[i - 1].focus_score
self.time_series[i].opp_score = self.time_series[i - 1].opp_score
elif score.formatted_label.startswith('o'):
self.time_series[i].focus_score = self.time_series[i - 1].focus_score
self.time_series[i].opp_score = self.time_series[i].label.point_value + self.time_series[i - 1].opp_score
else:
raise ValueError(
f"Invalid `formatted_label`, expected startswith = 'o' or 'f', "
f"got {score.formatted_label!r}")
ts = list(self.time_series)
ts.insert(
0,
HSScoring(
time_stamp=str(time(hour=0, minute=0, second=0)),
initiator='red',
focus_color='red',
period=1,
label=base.HSLabel('START')
)
)
self.time_series = tuple(ts)
return True
|
from typing import List
from pydantic import BaseModel
class Butaca(BaseModel):
idButaca: int
idSala: int
Sala: str
Cantidad: int
class Config:
orm_mode = True
|
def progress_level_percentage(percentage):
""" Progess percentage util.
- 0-33 red
- 34-66 yellow
- 67-100 green
"""
_percentage = int(percentage)
if 0 < _percentage <= 33:
level = 'danger'
elif 34 <= _percentage <= 66:
level = 'warning'
elif _percentage >= 67:
level = 'success'
else:
level = 'none'
return level
|
import logging
from dbnd._core.constants import ApacheBeamClusterType, CloudType, EnvLabel
from dbnd._core.errors import friendly_error
from dbnd._core.parameter.parameter_builder import parameter
from dbnd._core.parameter.parameter_definition import ParameterScope
from dbnd._core.task.config import Config
from targets import DirTarget
logger = logging.getLogger(__name__)
task_env_param = parameter(scope=ParameterScope.children)
class EnvConfig(Config):
"""Databand's environment configuration"""
_conf__task_family = "env"
cloud_type = parameter(description="cloud type: gcp/aws/")[str]
env_label = parameter(
default=EnvLabel.dev, description="environment type: dev/int/prod"
)[
str
] # label
production = parameter(
description="indicates that environment is production"
).value(False)
conn_id = parameter(default=None, description="cloud connection settings")[str]
# MAIN OUTPUT FOLDER
root = parameter.folder[DirTarget]
# DATABAND SYSTEM FOLDERS
dbnd_root = parameter(description="DBND rooted home folder").output.folder(
default=None
)[DirTarget]
dbnd_local_root = parameter(
description="DBND home for the local engine environment"
).output.folder()[DirTarget]
dbnd_data_sync_root = parameter(
description="Rooted directory for target syncing against remote engine"
).output.folder()[DirTarget]
# execution
local_engine = parameter(
default="local_machine_engine", description="Engine for local execution"
)[str]
remote_engine = parameter(
description="Remote engine for driver/tasks execution"
).none[str]
submit_driver = parameter(description="Submit driver to remote_engine").none[bool]
submit_tasks = parameter(
description="Submit tasks to remote engine one by one"
).none[bool]
# properties that will affect "task-env" section
spark_config = task_env_param.help("Spark Configuration").value("spark")
spark_engine = task_env_param.help(
"Cluster engine (local/emr(aws)/dataproc(gcp)/.."
).value("spark_local")
hdfs = task_env_param.help("Hdfs cluster config").value("hdfs_knox")
beam_config = task_env_param.help("Apache Beam configuration").value("beam")
beam_engine = task_env_param.help(
"Apache Beam cluster engine (local/dataflow)"
).value(ApacheBeamClusterType.local)
docker_engine = task_env_param.help("Docker job engine (docker/aws_batch)").value(
"docker"
)
def _initialize(self):
super(EnvConfig, self)._initialize()
self.dbnd_root = self.dbnd_root or self.root.folder("dbnd")
if not self.dbnd_local_root:
if not self.dbnd_root.is_local():
raise friendly_error.config.dbnd_root_local_not_defined(self.name)
self.dbnd_local_root = self.dbnd_root
if not self.dbnd_data_sync_root:
self.dbnd_data_sync_root = self.dbnd_root.folder("sync")
if self.submit_driver is None:
self.submit_driver = bool(self.remote_engine)
if self.submit_tasks is None:
self.submit_tasks = bool(self.remote_engine)
@property
def name(self):
return self.task_meta.task_name
@property
def cloud_type(self):
return self.task_meta.task_family
def prepare_env(self):
pass
class LocalEnvConfig(EnvConfig):
"""
Local environment configuration section
"""
_conf__task_family = CloudType.local
|
# !/usr/bin/env python
# -*-coding:utf-8 -*-
# Warning :The Hard Way Is Easier
import random
import string
"""
1.如何随机获取26个字母大小写?如果给定不同字母不同的权重值,如果实现根据不同权重获取字母?
英文字母的ASCII标码 97-123
"""
print(string.ascii_letters) # 获取大小写字符串
print(string.ascii_lowercase) # 只获取小写字符串
print(string.ascii_uppercase) # 只获取大写字符串
# chr(int) 返回Unicode字符串
char_list = [chr(i) for i in range(97, 123)]
print(random.choice(char_list))
print(random.sample(char_list, 5))
# 实现权重解决方案
weight_data = {'a': 10, 'b': 15, 'c': 50}
def random_weight(origin: dict) -> object:
total = sum(origin.values())
# return a + (b-a) * self.random()
# 获取0-total之间的随机数
selected_num = random.uniform(0, total)
# selected_num = random.randint(0, total)
sum_wight = 0
for key in origin.keys():
sum_wight += origin[key]
# 大于等于最合理, 末尾total值不包括在内
if sum_wight >= selected_num: # 随机数落在该权重内
return key
return None
print(random_weight(weight_data))
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib, math
# Create subplot
fig, ax = plt.subplots(1)
# Create circle
theta = np.linspace(0, 2*np.pi, 100)
r = 250.0
x1 = r*np.cos(theta)
x2 = r*np.sin(theta)
# Print circle
ax.plot(x1, x2)
ax.set_aspect(1)
# Configure grid
plt.xlim(-300.00,300.00)
plt.ylim(-300.00,300.00)
plt.grid(linestyle='--')
plt.xlabel('X')
plt.ylabel('Z')
# Adding points
xy1,xy2 = (-100, 0.0), (0.0,0.0)
# Create vector to translate
#ax.plot(xy1, xy2, color='r', lw=2)
#ax.scatter(xy1[0],xy1[1], color='r')
#ax.scatter(xy2[0],xy2[1], color='r')
#ax.annotate('x', (-110,-35.0) )
#ax.annotate('x0', (-10.0,-35.0) )
#ax.annotate('r', (-50.0,15.0), color='r')
# Save file
#plt.savefig("fig_01.png", bbox_inches='tight')
# Arc de cercle
ax.annotate('r', (-80.0,250.0), color='r')
a = matplotlib.patches.Arc((0, 0), 500, 500, 0, 90, 90+(100/250)*90, color='red', lw=2, zorder=5)
ax.add_patch(a)
# Create vector to translate
x_ = 250 * math.sin( 100/250 * (math.pi/2) )
ax.plot((-1*x_,0.0), xy2, color='orange', lw=2)
ax.annotate('r\'', (-75.0,15.0), color='orange')
ax.plot( (-1*x_,0.0), (200.0,0.0), color='black', lw=1 )
ax.plot( (0.0,0.0), (250.0,0.0), color='black', lw=1 )
b = matplotlib.patches.Arc((0, 0), 150, 150, 0, 90, 90+(100/250)*90, color='black', lw=1, zorder=5)
ax.annotate(r'$\theta$', (-40.0,100.0), color='black')
ax.add_patch(b)
#ax.plot((-100.0,-0.0), (-3.0,-3.0), color='red', lw=2)
#ax.annotate('r', (-75.0,-35.0), color='red')
# Save file
plt.savefig("fig_02.png", bbox_inches='tight')
#plt.savefig("fig_03.png", bbox_inches='tight')
#plt.show()
|
# Generated by Django 3.1.2 on 2020-10-27 20:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pokemonfans', '0013_pokemon_type_1'),
]
operations = [
migrations.AddField(
model_name='pokemon',
name='description',
field=models.TextField(default='henlo!', help_text="Some kind of description for the Pokémon. This could be composed of Pokédex entries, the Pokémon's biology etc."),
preserve_default=False,
),
]
|
import mysql.connector
from mysql.connector import Error
#Open database connection with a dictionary
conDict = {'host':'localhost',
'database':'game_records',
'user':'root',
'password':''}
db = mysql.connector.connect(**conDict)
#preparing cursor
cursor = db.cursor()
#executing SQL query
myInsertText = "INSERT INTO tbl_game_score VALUES (12341,'Me,'Easy',10,50)"
cursor.execute(myInsertText)
#Commit
db.commit()
print(cursor.rowcount,"Record Added")
#Disconnect
db.close()
|
"""
Connection manager for DynamoDB
"""
import boto3
from botocore.exceptions import ClientError
import logging.config
import os
import constants
import settings
LOGGING_CONFIG = os.path.join(settings.ROOT_DIR, 'logging.conf')
logging.config.fileConfig(LOGGING_CONFIG)
log = logging.getLogger()
class DynamoDBConn():
def init_db(self, db_url, table_name, region):
self.db = boto3.resource('dynamodb', region, endpoint_url=db_url)
self.table_name = table_name
def get_all_listings(self):
"""
Return all db listings
Future versions will support paging, offsets, and search parameters
:return: dict
"""
table = self.db.Table(self.table_name)
response = table.scan()
return response['Items']
def get_listing(self, payload):
"""
Return a listing from the database
:param payload: dict
:return: dict
"""
try:
table = self.db.Table(self.table_name)
response = table.get_item(
Key={
'listing_hash': payload['listing_hash']
}
)
if 'Item' in response:
return response['Item']
else:
return constants.ITEM_NOT_FOUND
except Exception as exc:
print(exc)
return constants.DB_ERROR
def add_listing(self, payload):
"""
Add a listing to the table
:param payload: dict
:return: string
"""
try:
table = self.db.Table(self.table_name)
response = table.put_item(
Item=payload
)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
return constants.DB_SUCCESS
else:
return constants.DB_ERROR
except ClientError as exc:
if exc.response['Error']['Code'] == 'ValidationException':
log.critical(exc)
return 'ValidationException'
# TODO: Add 'data' array to db schema
else:
raise
dynamo_conn = DynamoDBConn()
|
from pathlib import Path
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from confluence_client import ConfluenceClient
class ConfluenceContent:
def __init__(self, html):
self.html_soup = BeautifulSoup(html, 'lxml')
def process_images(self, page_html_path: Path, confluence_page_id: str, confluence: ConfluenceClient):
images = self.html_soup.select('img')
for image in images:
src = image.attrs['src']
if urlparse(src).netloc == '':
source_file_path = page_html_path.parent.joinpath(src).resolve()
attachment = confluence.upload_image(page_id=confluence_page_id,
file_to_upload=source_file_path,
name=source_file_path.name)
if 'results' in attachment:
attachment_name = attachment['results'][0]['title']
else:
attachment_name = attachment['title']
confluence_image = self.html_soup.new_tag(nsprefix="ac", name="image")
confluence_image_attachment = self.html_soup.new_tag(nsprefix="ri", name="attachment")
confluence_image_attachment.attrs["ri:filename"] = attachment_name
confluence_image.append(confluence_image_attachment)
image.replace_with(confluence_image)
def process_links(self, page_html_path: Path, html_root: Path, page_map: dict):
anchors = self.html_soup.select('a[href]')
for anchor in anchors:
href = anchor.attrs['href']
if urlparse(href).netloc == '':
target_path = page_html_path.parent.joinpath(href).resolve()
source_file_path = target_path.with_suffix('.md').relative_to(html_root)
if source_file_path in page_map:
mapped_page = page_map[source_file_path]
anchor.attrs['href'] = f'https://docs.osi.dataport.de{mapped_page["_links"]["webui"]}'
def get_html(self):
return str(self.html_soup)
|
"""
@author: Justin Pusztay
"""
# In this file we will only worry about 'NN' simulations
# In this file we only start from empty with the states.
"""
Average is multiplying states of two nodes and dividing by number of timesteps
"""
import Cayley as cy
import Cayley.research as cr
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
def main():
# Set up for simulation
generations = 3
links = 3
alpha = 0.5
beta = 0.2
gamma = 0.2
mu = 0.2
r1 = 0.5
r2 = 0.5
trials = 50
k = 12
J = 10
timesteps = 10
b = cy.CayleyTree(generations,links)
monte = cy.MonteCarlo(b, alpha, beta, gamma, mu, r1, r2)
monte.startEmpty()
# Runs one trial
# # # # # # # # # # # # #
for x in range(timesteps):
monte.simulateNN()
# # # # # # # # # # # # #
# Generates statistics about trial
a = cr.density(b,monte)
cr.final_state(monte)
c = cr.density_generations(b,monte)
# Densities per timestep plot
plt.plot(a.values())
plt.ylabel('density')
plt.xlabel('timestep')
plt.show()
# Final state graphics
G = nx.Graph()
color_map = list()
for node in b:
if monte.previousState(node) == 0:
color_map.append('green')
else:
color_map.append('red')
G.add_nodes_from(b)
G.add_edges_from(b.linksAsTuples())
nx.draw(G,node_color = color_map,with_labels = True)
plt.show()
generation = 0
for x in c.values():
plt.plot(x.values(),label = "Generation "+str(generation))
generation += 1
plt.ylabel("Generational Density")
plt.xlabel("timestep")
plt.legend()
plt.show()
if __name__ == "__main__":
main()
|
'''
Make a Python program, which can validate if the input string is a valid Finnish id number.
You must calculate that the checksum is correct. Also output the birthdate of the person in format day.month.year
and tell the gender (male/female) See https://fi.wikipedia.org/wiki/Henkil%C3%B6tunnus
'''
import re
check_table = {0: '0', 1: '1', 2: '2', 3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8',
9: '9', 10: 'A', 11: 'B', 12: 'C', 13: 'D', 14: 'E', 15: 'F', 16: 'H',
17: 'J', 18: 'K', 19: 'L', 20: 'M', 21: 'N', 22: 'P', 23: 'R', 24: 'S',
25: 'T', 26: 'U', 27: 'V', 28: 'W', 29: 'X', 30: 'Y'}
while True:
id_number = input('Give Finnish id number: ')
check_valid_first_step = re.match('^[0-9]{6}(-)?(a)?(A)?(\+)?[A-Za-z0-9]{4}$', id_number)
if not check_valid_first_step:
print('Not valid id - input is missing something or something is mispelled')
continue
else:
#Collect information from id number for further validation
separate_id = id_number.split(id_number[6])
day_month_year = re.findall('..', separate_id[0])
id_end = re.findall('.{1,3}', separate_id[1])
individual_number = id_end[0]
check_character = id_end[1].upper()
birthdate_individual_number = int(separate_id[0] + individual_number)
individual_number = int(individual_number) #convert to number for testing purposis
check_number = round(((birthdate_individual_number / 31) % 1) * 31)
if check_table[check_number] != check_character:
print('Not valid id - identity code is wrong')
continue
#Is male or female?
if individual_number % 2 == 0:
gender = 'female'
else :
gender = 'male'
#Born in 18 or 19 or 20 century?
if id_number[6] == '+':
century = '18'
elif id_number[6] == '-':
century = '19'
else:
century = '20'
day = day_month_year[0]
month = day_month_year[1]
year = century + day_month_year[2]
print(day, '.', month, '.', year)
print('Gender: ', gender)
|
"""
test for the psurl module.
"""
import unittest
from base_test import PschedTestBase
from pscheduler.psurl import *
class TestPsurl(PschedTestBase):
"""
URL tests.
"""
def test_url_bad(self):
"""IP addr tests"""
# Missing scheme
no_scheme = "no-scheme"
(status, _) = url_get(no_scheme, json=False, throw=False)
self.assertEqual(status, 400)
self.assertRaises(pscheduler.URLException, url_get, no_scheme, json=False, throw=True)
# Bad IPv6 address
bad6 = "http://dead:beef::1bad:cafe/"
(status, _) = url_get(bad6, json=False, throw=False)
self.assertEqual(status, 400)
self.assertRaises(pscheduler.URLException, url_get, bad6, json=False, throw=True)
# This address is in the blocks reserved for RFC6666 discards.
discard = "https://[0100::0010]/"
self.assertEqual(
url_get(discard, timeout=1, json=False, throw=False)[0], 400)
self.assertRaises(pscheduler.URLException, url_get, discard, json=False, timeout=1, throw=True)
def test_url_get(self):
# TODO: Would need a web server to test this
pass
def test_url_get_bind(self):
"""See if binding works"""
self.assertEqual(
url_get("http://127.0.0.1", bind="192.0.2.1", throw=False),
(400, 'bind failed with errno 99: Cannot assign requested address')
)
def test_url_put(self):
# TODO: Would need a web server to test this
pass
def test_url_post(self):
# TODO: Would need a web server to test this
pass
def test_url_delete(self):
# TODO: Would need a web server to test this
pass
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import sys
from concurrent.futures import as_completed
from concurrent.futures.thread import ThreadPoolExecutor
from multiprocessing import cpu_count, get_context
from typing import Any, Callable, Dict, List
from django.conf import settings
def batch_call(
func: Callable,
params_list: List[Dict[str, Any]],
handle_func_result: Callable = lambda x: x,
expand_result: bool = False,
) -> List:
"""
多线程处理函数
:param func: 逻辑函数
:param params_list: 参数列表
:param handle_func_result: 处理逻辑函数返回结果
:param expand_result: 是否通过expand整合返回结果
:return:
"""
func_result_list = []
with ThreadPoolExecutor(max_workers=settings.CONCURRENT_NUMBER) as ex:
tasks = [ex.submit(func, **params) for params in params_list]
for future in as_completed(tasks):
if expand_result:
func_result_list.extend(handle_func_result(future.result()))
else:
func_result_list.append(handle_func_result(future.result()))
return func_result_list
def batch_call_multi_proc(
func: Callable,
params_list: List[Dict[str, Any]],
handle_func_result: Callable = lambda x: x,
expand_result: bool = False,
) -> List:
"""
多进程处理函数
TODO 暂无法处理MySQL多进程链接
:param func: 逻辑函数
:param params_list: 参数列表
:param handle_func_result: 处理逻辑函数返回结果
:param expand_result: 是否通过expand整合返回结果
:return:
"""
if sys.platform in ["win32", "cygwim", "msys"]:
return batch_call(func, params_list, handle_func_result, expand_result)
else:
ctx = get_context("fork")
func_result_list = []
pool = ctx.Pool(processes=cpu_count())
futures = [pool.apply_async(func=func, kwds=params) for params in params_list]
pool.close()
pool.join()
# 取值
for future in futures:
if expand_result:
func_result_list.extend(handle_func_result(future.get()))
else:
func_result_list.append(handle_func_result(future.get()))
return func_result_list
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
def home(request):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse("operations:operation_list"))
# test si authentifié, si oui, rediriger vers convention/index...
return render(request, "index.html")
|
import logging
from time import sleep
from r7insight import R7InsightHandler
# add your token from insightops here
# token = ''
region = 'EU'
log = logging.getLogger('r7insight')
log.setLevel(logging.INFO)
test = R7InsightHandler(token, region)
log.addHandler(test)
log.warn("Warning message")
log.info("Info message")
sleep(10)
|
import datetime
import string
import random
import json
import boto3
from prettytable import PrettyTable # imported from Lambda layer
# PrettyTable
TABLE = PrettyTable(['Region', 'MonthlySpendLimit ($)', 'SMSMonthToDateSpentUSD'])
START_TIME = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
END_TIME = datetime.datetime.utcnow()
REGIONS = ['us-east-2', 'us-east-1', 'us-west-1', 'us-west-2', 'ap-south-1', 'ap-southeast-1', 'ap-southeast-2', 'ap-northeast-1',
'ca-central-1', 'eu-central-1', 'eu-west-1', 'eu-west-2', 'eu-west-3', 'eu-north-1', 'me-south-1', 'sa-east-1', 'us-gov-west-1']
# Hardcoded to include us-gov-west-1 region
# Regions from: https://docs.aws.amazon.com/sns/latest/dg/sns-supported-regions-countries.html
def get_sms_cost(region):
"""Get SMS Metrics"""
# AWS SDK Clients
cloudwatch_client = boto3.client('cloudwatch', region_name=region)
sns_client = boto3.client('sns', region_name=region)
cw_id = random.choice(string.ascii_lowercase) + random.choice(string.ascii_uppercase)
try:
cw_response = cloudwatch_client.get_metric_data(
MetricDataQueries=[
{
'Id': cw_id,
'MetricStat': {
'Metric': {
'Namespace': 'AWS/SNS',
'MetricName': 'SMSMonthToDateSpentUSD'
},
'Period': 300,
'Stat': 'Maximum'
},
'Label': 'SMSMonthToDateSpentUSD',
'ReturnData': True,
},
],
EndTime=END_TIME,
StartTime=START_TIME
)
cw_value = f"${cw_response['MetricDataResults'][0]['Values'][0]}"
except IndexError:
cw_value = '$0'
except Exception as exp:
cw_value = f'Account not configured for {region}'
# Check MonthlySpendLimit
try:
sns_response = sns_client.get_sms_attributes()
sns_value = f"${sns_response['attributes']['MonthlySpendLimit']}"
except Exception as exp:
if region in ['us-gov-west-1', 'me-south-1']:
sns_value = f'Account not configured for {region}'
else:
sns_value = 'Default: $1'
TABLE.add_row([region, sns_value, cw_value])
def lambda_handler(event, context):
"""Main Function"""
for region in REGIONS:
get_sms_cost(region)
print(TABLE)
return {
'statusCode': 200,
'body': json.dumps('Check CloudWatch logs.')
}
|
import pytest
def pytest_runtest_setup(item):
r"""Our tests will often run in headless virtual environments. For this
reason, we enforce the use of matplotlib's robust Agg backend, because it
does not require a graphical display.
This avoids errors such as:
c:\hostedtoolcache\windows\python\3.7.5\x64\lib\tkinter\__init__.py:2023: TclError
This probably means that tk wasn't installed properly.
"""
import matplotlib
matplotlib.use("Agg")
# Add a marker @pytest.mark.memtest
# - used to mark tests that stress memory, typically done by limiting the memory Python can use
# - thus they should be run in isolation.
#
# - skipped by default
# - tests marked as such can be run by "-m memtest" option
def pytest_configure(config):
config.addinivalue_line(
"markers", "memtest: mark memory usage tests that need to be run in isolation"
)
def pytest_collection_modifyitems(config, items):
keywordexpr = config.option.keyword
markexpr = config.option.markexpr
if keywordexpr or markexpr:
return # let pytest handle this
skip_memtest = pytest.mark.skip(reason='memtest skipped, need -m memtest option to run')
for item in items:
if 'memtest' in item.keywords:
item.add_marker(skip_memtest)
|
from bideox.settings.base import *
DEBUG = False
ALLOWED_HOSTS = ['*']
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django.core.urlresolvers import reverse
from mox import IsA
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
INDEX_URL = reverse('horizon:project:images_and_snapshots:index')
class VolumeSnapshotsViewTests(test.TestCase):
@test.create_stubs({quotas: ('tenant_quota_usages',)})
def test_create_snapshot_get(self):
volume = self.volumes.first()
usage = {'gigabytes': {'available': 250},
'snapshots': {'available': 6}}
quotas.tenant_quota_usages(IsA(http.HttpRequest)).AndReturn(usage)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:create_snapshot',
args=[volume.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'project/volumes/create_snapshot.html')
@test.create_stubs({cinder: ('volume_snapshot_create',)})
def test_create_snapshot_post(self):
volume = self.volumes.first()
snapshot = self.volume_snapshots.first()
cinder.volume_snapshot_create(IsA(http.HttpRequest),
volume.id,
snapshot.display_name,
snapshot.display_description) \
.AndReturn(snapshot)
self.mox.ReplayAll()
formData = {'method': 'CreateSnapshotForm',
'tenant_id': self.tenant.id,
'volume_id': volume.id,
'name': snapshot.display_name,
'description': snapshot.display_description}
url = reverse('horizon:project:volumes:create_snapshot',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.glance: ('image_list_detailed',
'snapshot_list_detailed'),
api.cinder: ('volume_snapshot_list',
'volume_snapshot_delete')})
def test_delete_volume_snapshot(self):
vol_snapshots = self.volume_snapshots.list()
snapshot = self.volume_snapshots.first()
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn(([], False))
api.glance.snapshot_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn(([], False))
api.cinder.volume_snapshot_list(IsA(http.HttpRequest)). \
AndReturn(vol_snapshots)
api.cinder.volume_snapshot_delete(IsA(http.HttpRequest), snapshot.id)
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn(([], False))
api.glance.snapshot_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn(([], False))
api.cinder.volume_snapshot_list(IsA(http.HttpRequest)). \
AndReturn([])
self.mox.ReplayAll()
formData = {'action':
'volume_snapshots__delete__%s' % snapshot.id}
res = self.client.post(INDEX_URL, formData, follow=True)
self.assertIn("Scheduled deletion of Volume Snapshot: test snapshot",
[m.message for m in res.context['messages']])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-08-30 23:11
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flexible_reports', '0005_column_attrs'),
]
operations = [
migrations.CreateModel(
name='ColumnOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.PositiveIntegerField(default=0, verbose_name='Position')),
('column', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='flexible_reports.Column', verbose_name='Column')),
('table', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='flexible_reports.Table', verbose_name='Table')),
],
options={
'ordering': ('position',),
'abstract': False,
},
),
]
|
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import array
from PIL import Image, ImageChops, ImageFilter
texture = [9.962654113769531 for i in range(255)]
trans_4_firefox = [6.102943420410156, 1.7255783081054688, 4.796600341796875, 3.0786514282226562, 3.874969482421875, 4.560279846191406, 2.9211044311523438, 5.743980407714844, 1.8596649169921875, 6.618309020996094, 2.242279052734375, 5.907630920410156, 3.5291671752929688, 5.142974853515625, 4.7641754150390625, 4.239845275878906, 6.006622314453125, 3.5566329956054688, 7.046318054199219, 2.458953857421875, 6.98089599609375, 4.097747802734375, 6.334495544433594, 5.1372528076171875, 5.4424285888671875, 6.276130676269531, 4.924774169921875, 7.284736633300781, 4.731178283691406, 8.024406433105469, 5.0567626953125, 7.3467254638671875, 5.761528015136719, 6.664085388183594, 6.6364288330078125, 6.169319152832031, 7.5374603271484375, 5.8837890625, 8.366203308105469, 5.925559997558594, 8.184623718261719, 6.5082550048828125, 7.806587219238281, 7.1502685546875, 7.318305969238281, 7.938194274902344, 7.0194244384765625, 8.648109436035156, 6.986236572265625, 9.1217041015625, 7.337760925292969, 8.676719665527344, 7.705116271972656, 8.269691467285156, 8.327674865722656, 7.944297790527344, 9.036827087402344, 7.929039001464844, 9.600830078125, 7.801055908203125, 9.482955932617188, 8.40911865234375, 9.093666076660156, 8.778762817382812, 8.809852600097656, 9.267807006835938, 8.715629577636719, 9.830284118652344, 8.734321594238281, 10.346603393554688, 8.881950378417969, 9.682273864746094, 9.181976318359375, 9.375572204589844, 9.366416931152344, 9.228324890136719, 9.894943237304688, 9.209823608398438, 10.355567932128906, 9.233474731445312, 10.138511657714844, 9.40399169921875, 9.819793701171875, 9.748458862304688, 9.659004211425781, 9.977340698242188, 9.584236145019531, 10.347175598144531, 9.538841247558594, 10.693359375, 9.695243835449219, 10.190773010253906, 9.833717346191406, 9.986114501953125, 10.077476501464844, 9.785270690917969, 10.385704040527344, 9.865760803222656, 10.672569274902344, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875, 6.938934326171875]
trans_chrome = [0.1505533903837204, 0.3038194477558136, 0.4292806088924408, 0.5622016191482544, 0.7073296308517456, 0.8633083701133728, 1.03759765625, 1.1433919668197632, 1.2444390058517456, 1.3753255605697632, 1.4533149003982544, 1.6452364921569824, 1.7923990488052368, 1.8771700859069824, 2.013482093811035, 2.159288167953491, 2.287461996078491, 2.392578125, 2.5105793476104736, 2.570258140563965, 2.726915121078491, 2.812364339828491, 2.9025607109069824, 3.009711265563965, 3.125, 3.1887478828430176, 3.2735188007354736, 3.3969454765319824, 3.533935546875, 3.5542805194854736, 3.6960177421569824, 3.7984211444854736, 3.8492839336395264, 3.986952066421509, 4.088677406311035, 4.213460445404053, 4.241265296936035, 4.336208820343018, 4.413519859313965, 4.469808101654053, 4.5654296875, 4.656304359436035, 4.73768424987793, 4.888237953186035, 4.916042804718018, 5.03675651550293, 5.07337760925293, 5.157470703125, 5.147976398468018, 5.245632648468018, 5.339898109436035, 5.431450843811035, 5.409749507904053, 5.537923336029053, 5.572509765625, 5.61387825012207, 5.733235836029053, 5.796305179595947, 5.88243293762207, 5.85801887512207, 6.005181312561035, 6.057400226593018, 5.99839973449707, 6.104193687438965, 6.157769203186035, 6.179470539093018, 6.235758304595947, 6.29136848449707, 6.300862789154053, 6.40123176574707, 6.37071418762207, 6.380886554718018, 6.47718620300293, 6.510416507720947, 6.503634929656982, 6.561279296875, 6.533474445343018, 6.597222328186035, 6.640625, 6.634521484375, 6.698947429656982, 6.673177242279053, 6.708441734313965, 6.789822101593018, 6.78846549987793, 6.7626953125, 6.746419429779053, 6.78575325012207, 6.787787437438965, 6.852213382720947, 6.868489742279053, 6.847466468811035, 6.8603515625, 6.869167804718018, 6.917995929718018, 6.89222526550293, 6.949191570281982, 6.940375328063965, 6.900363445281982, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875, 6.982421875]
point_no_am = [0.5908966064453125, 0.8367538452148438, 1.2102127075195312, 1.4507293701171875, 1.6393661499023438, 2.04010009765625, 2.2119522094726562, 2.3973464965820312, 2.5667190551757812, 2.7154922485351562, 2.9256820678710938, 3.0750274658203125, 3.0792236328125, 3.16925048828125, 3.1219482421875, 3.25164794921875, 3.2718658447265625, 3.2453536987304688, 3.29742431640625, 3.2459259033203125, 3.2476425170898438, 3.2388687133789062, 3.2444000244140625, 3.2262802124023438, 3.115081787109375, 3.1332015991210938, 3.08074951171875, 3.081512451171875, 3.0546188354492188, 2.9649734497070312, 2.8959274291992188, 2.8850555419921875, 2.8612136840820312, 2.8308868408203125, 2.7406692504882812, 2.7353286743164062, 2.7063369750976562, 2.6620864868164062, 2.6454925537109375, 2.5934219360351562, 2.6264190673828125, 2.55126953125, 2.5636672973632812, 2.48565673828125, 2.4648666381835938, 2.4332046508789062, 2.3769378662109375, 2.388763427734375, 2.3342132568359375, 2.3012161254882812, 2.30255126953125, 2.2235870361328125, 2.24456787109375, 2.204132080078125, 2.1825790405273438, 2.1604537963867188, 2.1261215209960938, 2.11639404296875, 2.0816802978515625, 2.0488739013671875, 2.0414352416992188, 2.0235061645507812, 2.0120620727539062, 1.9744873046875, 1.9458770751953125, 1.9372940063476562, 1.9132614135742188, 1.9195556640625, 1.874542236328125, 1.8461227416992188, 1.8522262573242188, 1.83258056640625, 1.8312454223632812, 1.8075942993164062, 1.7749786376953125, 1.7719268798828125, 1.7267227172851562, 1.7461776733398438, 1.6881942749023438, 1.68304443359375, 1.6956329345703125, 1.6672134399414062, 1.64794921875, 1.6160964965820312, 1.605224609375, 1.6004562377929688, 1.5886306762695312, 1.56707763671875, 1.5634536743164062, 1.5468597412109375, 1.5506744384765625, 1.5272140502929688, 1.5117645263671875, 1.5293121337890625, 1.494598388671875, 1.49078369140625, 1.4926910400390625, 1.4614105224609375, 1.462554931640625, 1.4314651489257812, 1.4513015747070312, 1.428985595703125, 1.403045654296875, 1.3736724853515625, 1.3568878173828125, 1.3727188110351562, 1.3500213623046875, 1.3387680053710938, 1.3347625732421875, 1.3216018676757812, 1.3347625732421875, 1.3240814208984375, 1.3195037841796875, 1.291656494140625, 1.2819290161132812, 1.2844085693359375, 1.2544631958007812, 1.2639999389648438, 1.2424468994140625, 1.2111663818359375, 1.2311935424804688, 1.2228012084960938, 1.2208938598632812, 1.1926651000976562, 1.1816024780273438, 1.1854171752929688, 1.1804580688476562, 1.1610031127929688, 1.1516571044921875, 1.1508941650390625, 1.1577606201171875, 1.1419296264648438, 1.1281967163085938, 1.1211395263671875, 1.1180877685546875, 1.1114120483398438, 1.1016845703125, 1.1020660400390625, 1.092529296875, 1.0747909545898438, 1.0908126831054688, 1.0763168334960938, 1.0610580444335938, 1.0707855224609375, 1.0585784912109375, 1.0608673095703125, 1.0457992553710938, 1.0517120361328125, 1.0293960571289062, 1.0282516479492188, 1.0417938232421875, 1.0204315185546875, 1.0194778442382812, 1.0107040405273438, 1.001739501953125, 1.0038375854492188, 0.9998321533203125, 0.9870529174804688, 0.983428955078125, 0.9830474853515625, 0.9672164916992188, 0.9916305541992188, 0.9702682495117188, 0.9744644165039062, 0.960540771484375, 0.960540771484375, 0.96435546875, 0.9426116943359375, 0.9429931640625, 0.9344100952148438, 0.9389877319335938, 0.9477615356445312, 0.922393798828125, 0.9080886840820312, 0.9115219116210938, 0.9124755859375, 0.9080886840820312, 0.8924484252929688, 0.8829116821289062, 0.872039794921875, 0.8831024169921875, 0.8726119995117188, 0.860595703125, 0.8665084838867188, 0.847625732421875, 0.855255126953125, 0.8615493774414062, 0.8327484130859375, 0.8453369140625, 0.8472442626953125, 0.8329391479492188, 0.8457183837890625, 0.81787109375, 0.8230209350585938, 0.8167266845703125, 0.8131027221679688, 0.8060455322265625, 0.8100509643554688, 0.8100509643554688, 0.7999420166015625, 0.791168212890625, 0.80108642578125, 0.7984161376953125, 0.7928848266601562, 0.7783889770507812, 0.7844924926757812, 0.77972412109375, 0.7703781127929688, 0.7781982421875, 0.7600784301757812, 0.7520675659179688, 0.7688522338867188, 0.7631301879882812, 0.772857666015625, 0.7593154907226562, 0.7570266723632812, 0.753021240234375, 0.7413864135742188, 0.7406234741210938, 0.7356643676757812, 0.740814208984375, 0.7516860961914062, 0.74310302734375, 0.7463455200195312, 0.7242202758789062, 0.7219314575195312, 0.7328033447265625, 0.7198333740234375, 0.7232666015625, 0.7091522216796875, 0.7144927978515625, 0.7198333740234375, 0.7261276245117188, 0.7251739501953125, 0.7167816162109375, 0.7055282592773438, 0.71868896484375, 0.7062911987304688, 0.7076263427734375, 0.70037841796875, 0.6908416748046875, 0.705718994140625, 0.6952285766601562, 0.6978988647460938, 0.67901611328125, 0.6866455078125, 0.6902694702148438, 0.6769180297851562, 0.68817138671875, 0.6807327270507812, 0.6799697875976562, 0.6830215454101562, 0.6700515747070312, 0.6755828857421875, 0.6647109985351562]
light_no_am = [1.3299942016601562, 2.1608352661132812, 3.0452728271484375, 3.7454605102539062, 4.429817199707031, 5.1483154296875, 5.624961853027344, 6.012535095214844, 6.340980529785156, 6.534004211425781, 6.88018798828125, 7.1475982666015625, 6.927490234375, 7.0438385009765625, 6.829833984375, 6.749534606933594, 6.757926940917969, 6.444549560546875, 6.353187561035156, 6.05316162109375, 5.931663513183594, 5.7506561279296875, 5.651664733886719, 5.492401123046875, 5.1761627197265625, 5.051612854003906, 4.848480224609375, 4.7718048095703125, 4.650688171386719, 4.314994812011719, 4.218864440917969, 4.1259765625, 3.9945602416992188, 3.8995742797851562, 3.7109375, 3.6218643188476562, 3.5205841064453125, 3.4856796264648438, 3.3670425415039062, 3.2506942749023438, 3.217315673828125, 3.0805587768554688, 3.0981063842773438, 2.9714584350585938, 2.8413772583007812, 2.8219223022460938, 2.7257919311523438, 2.7330398559570312, 2.6468276977539062, 2.5213241577148438, 2.519989013671875, 2.4190902709960938, 2.4028778076171875, 2.3548126220703125, 2.2424697875976562, 2.240753173828125, 2.1810531616210938, 2.1890640258789062, 2.1373748779296875, 2.06756591796875, 2.0542144775390625, 2.0059585571289062, 1.9643783569335938, 1.9428253173828125, 1.8705368041992188, 1.8665313720703125, 1.83868408203125, 1.8590927124023438, 1.78985595703125, 1.7719268798828125, 1.7444610595703125, 1.7091751098632812, 1.708221435546875, 1.6727447509765625, 1.6374588012695312, 1.6355514526367188, 1.5958786010742188, 1.5756607055664062, 1.5544891357421875, 1.5283584594726562, 1.5386581420898438, 1.5268325805664062, 1.4759063720703125, 1.4329910278320312, 1.4108657836914062, 1.45111083984375, 1.4102935791015625, 1.4028549194335938, 1.3940811157226562, 1.3538360595703125, 1.3837814331054688, 1.3746261596679688, 1.3158798217773438, 1.3202667236328125, 1.28326416015625, 1.290130615234375, 1.28631591796875, 1.2651443481445312, 1.2516021728515625, 1.2353897094726562, 1.2378692626953125, 1.2271881103515625, 1.197052001953125, 1.1762619018554688, 1.1598587036132812, 1.1640548706054688, 1.1526107788085938, 1.1487960815429688, 1.1312484741210938, 1.1125564575195312, 1.1173248291015625, 1.1152267456054688, 1.094818115234375, 1.0644912719726562, 1.0639190673828125, 1.0715484619140625, 1.0446548461914062, 1.056671142578125, 1.0301589965820312, 1.0259628295898438, 1.0183334350585938, 1.0099411010742188, 1.0019302368164062, 0.9805679321289062, 0.9881973266601562, 0.9866714477539062, 0.9746551513671875, 0.9761810302734375, 0.933837890625, 0.9428024291992188, 0.9593963623046875, 0.9456634521484375, 0.9197235107421875, 0.9225845336914062, 0.8977890014648438, 0.9107589721679688, 0.9130477905273438, 0.8899688720703125, 0.8871078491210938, 0.8792877197265625, 0.8739471435546875, 0.8880615234375, 0.8548736572265625, 0.85296630859375, 0.8584976196289062, 0.8504867553710938, 0.8373260498046875, 0.8363723754882812, 0.8235931396484375, 0.8148193359375, 0.8268356323242188, 0.821685791015625, 0.8028030395507812, 0.8012771606445312, 0.7982254028320312, 0.8075714111328125, 0.811004638671875, 0.7892608642578125, 0.7761001586914062, 0.7762908935546875, 0.7776260375976562, 0.775909423828125, 0.7658004760742188, 0.7762908935546875, 0.75836181640625, 0.7703781127929688, 0.7465362548828125, 0.7532119750976562, 0.7448196411132812, 0.7333755493164062, 0.7371902465820312, 0.7463455200195312, 0.7154464721679688, 0.7221221923828125, 0.7116317749023438, 0.7171630859375, 0.7167816162109375, 0.701904296875, 0.7099151611328125, 0.6990432739257812, 0.69580078125, 0.7028579711914062, 0.6755828857421875, 0.6900787353515625, 0.6734848022460938, 0.679779052734375, 0.6717681884765625, 0.6689071655273438, 0.6641387939453125, 0.6595611572265625, 0.6662368774414062, 0.6593704223632812, 0.6589889526367188, 0.655364990234375, 0.6471633911132812, 0.64849853515625, 0.6502151489257812, 0.6353378295898438, 0.6391525268554688, 0.6345748901367188, 0.6404876708984375, 0.6334304809570312, 0.6330490112304688, 0.632476806640625, 0.6288528442382812, 0.6177902221679688, 0.6212234497070312, 0.6181716918945312, 0.615692138671875, 0.6072998046875, 0.6036758422851562, 0.6103515625, 0.5947113037109375, 0.60272216796875, 0.6097793579101562, 0.5970001220703125, 0.5939483642578125, 0.5916595458984375, 0.58441162109375, 0.5823135375976562, 0.5872726440429688, 0.5987167358398438, 0.5710601806640625, 0.5777359008789062, 0.5617141723632812, 0.5718231201171875, 0.5664825439453125, 0.553131103515625, 0.5580902099609375, 0.5474090576171875, 0.5598068237304688, 0.5542755126953125, 0.548553466796875, 0.5529403686523438, 0.5498886108398438, 0.540924072265625, 0.540924072265625, 0.537109375, 0.5418777465820312, 0.5338668823242188, 0.5437850952148438, 0.5300521850585938, 0.5235671997070312, 0.5392074584960938, 0.5304336547851562, 0.5353927612304688, 0.5329132080078125, 0.5365371704101562, 0.5191802978515625, 0.5205154418945312, 0.5281448364257812, 0.5218505859375, 0.5247116088867188, 0.518035888671875, 0.51422119140625]
point_light = [4.663276672363281, 4.793548583984375, 4.8496246337890625, 4.894256591796875, 4.959297180175781, 5.010414123535156, 5.062294006347656, 5.074501037597656, 5.0994873046875, 5.123329162597656, 5.153465270996094, 5.157279968261719, 5.16510009765625, 5.18951416015625, 5.176544189453125, 5.1883697509765625, 5.193901062011719, 5.213356018066406, 5.221366882324219, 5.224800109863281, 5.253410339355469, 5.233192443847656, 5.238533020019531, 5.2581787109375, 5.2577972412109375, 5.232429504394531, 5.228424072265625, 5.249786376953125, 5.245208740234375, 5.2402496337890625, 5.2371978759765625, 5.230140686035156, 5.23681640625, 5.227088928222656, 5.2135467529296875, 5.22918701171875, 5.214500427246094, 5.2272796630859375, 5.216407775878906, 5.192375183105469, 5.203819274902344, 5.2059173583984375, 5.192756652832031, 5.1906585693359375, 5.181312561035156, 5.183601379394531, 5.1891326904296875, 5.1727294921875, 5.191612243652344, 5.191612243652344, 5.18035888671875, 5.187416076660156, 5.162620544433594, 5.17730712890625, 5.1544189453125, 5.146217346191406, 5.139923095703125, 5.14984130859375, 5.136871337890625, 5.129241943359375, 5.1395416259765625, 5.140495300292969, 5.1380157470703125, 5.1296234130859375, 5.129051208496094, 5.1197052001953125, 5.115699768066406, 5.110740661621094, 5.1055908203125, 5.103492736816406, 5.101966857910156, 5.078887939453125, 5.0891876220703125, 5.0922393798828125, 5.106353759765625, 5.08575439453125, 5.081748962402344, 5.0785064697265625, 5.090141296386719, 5.0930023193359375, 5.075263977050781, 5.095672607421875, 5.0952911376953125, 5.087471008300781, 5.082130432128906, 5.074310302734375, 5.0624847412109375, 5.051422119140625, 5.050086975097656, 5.05218505859375, 5.064201354980469, 5.056571960449219, 5.069541931152344, 5.054664611816406, 5.047035217285156, 5.050086975097656, 5.049324035644531, 5.0502777099609375, 5.0350189208984375, 5.0262451171875, 5.023956298828125, 5.023765563964844, 5.020332336425781, 5.023765563964844, 5.024147033691406, 5.014228820800781, 5.013465881347656, 5.0258636474609375, 5.033683776855469, 5.0205230712890625, 5.023384094238281, 5.0251007080078125, 5.012321472167969, 5.0273895263671875, 5.016899108886719, 5.0380706787109375, 5.020904541015625, 5.016326904296875, 4.991912841796875, 4.9961090087890625, 4.990959167480469, 5.007171630859375, 5.0006866455078125, 5.005073547363281, 4.9976348876953125, 4.993438720703125, 5.002021789550781, 4.996490478515625, 4.997825622558594, 4.985237121582031, 4.982757568359375, 4.984474182128906, 5.000495910644531, 4.9823760986328125, 4.9747467041015625, 4.96978759765625, 4.965972900390625, 4.966926574707031, 4.9633026123046875, 4.9633026123046875, 4.947853088378906, 4.961585998535156, 4.951286315917969, 4.943084716796875, 4.955863952636719, 4.9468994140625, 4.955482482910156, 4.95758056640625, 4.9610137939453125, 4.967308044433594, 4.9808502197265625, 4.976844787597656, 4.970550537109375, 4.9701690673828125, 4.969024658203125, 4.9640655517578125, 4.959869384765625, 4.947662353515625, 4.949378967285156, 4.95147705078125, 4.973793029785156, 4.969596862792969, 4.9652099609375, 4.9625396728515625, 4.952812194824219, 4.9510955810546875, 4.950714111328125, 4.932403564453125, 4.93621826171875, 4.930686950683594, 4.94232177734375, 4.942131042480469, 4.949951171875, 4.950904846191406, 4.955291748046875, 4.946327209472656, 4.95147705078125, 4.948997497558594, 4.952430725097656, 4.941558837890625, 4.944419860839844, 4.947471618652344, 4.947471618652344, 4.941749572753906, 4.939842224121094, 4.9343109130859375, 4.936408996582031, 4.934120178222656, 4.936027526855469, 4.9259185791015625, 4.915618896484375, 4.9251556396484375, 4.929161071777344, 4.9289703369140625, 4.92706298828125, 4.93316650390625, 4.926490783691406, 4.9259185791015625, 4.932594299316406, 4.920196533203125, 4.923439025878906, 4.9198150634765625, 4.9167633056640625, 4.9152374267578125, 4.905891418457031, 4.91180419921875, 4.9114227294921875, 4.92095947265625, 4.9144744873046875, 4.90875244140625, 4.904747009277344, 4.897880554199219, 4.8885345458984375, 4.9053192138671875, 4.899024963378906, 4.899406433105469, 4.898643493652344, 4.897880554199219, 4.9030303955078125, 4.8923492431640625, 4.898262023925781, 4.895210266113281, 4.897117614746094, 4.9030303955078125, 4.891204833984375, 4.90264892578125, 4.8954010009765625, 4.8801422119140625, 4.877471923828125, 4.892921447753906, 4.876518249511719, 4.8786163330078125, 4.890251159667969, 4.889106750488281, 4.8854827880859375, 4.892158508300781, 4.881095886230469, 4.872894287109375, 4.874992370605469, 4.852485656738281, 4.876518249511719, 4.87213134765625, 4.862213134765625, 4.86297607421875, 4.8675537109375, 4.8614501953125, 4.8675537109375, 4.87213134765625, 4.866790771484375, 4.8648834228515625, 4.858207702636719, 4.8587799072265625, 4.85687255859375, 4.852867126464844, 4.856300354003906]
simple_light = [4.739189147949219, 4.979515075683594, 5.322456359863281, 5.526542663574219, 5.747795104980469, 6.0222625732421875, 6.1580657958984375, 6.323814392089844, 6.469917297363281, 6.591606140136719, 6.836509704589844, 6.9774627685546875, 6.992912292480469, 7.155609130859375, 7.087898254394531, 7.210350036621094, 7.2998046875, 7.248497009277344, 7.3192596435546875, 7.229423522949219, 7.2948455810546875, 7.28912353515625, 7.344818115234375, 7.33184814453125, 7.1685791015625, 7.1319580078125, 7.11212158203125, 7.138252258300781, 7.1414947509765625, 6.975555419921875, 6.949424743652344, 6.9286346435546875, 6.896781921386719, 6.9091796875, 6.7584991455078125, 6.7508697509765625, 6.67572021484375, 6.677436828613281, 6.642723083496094, 6.5227508544921875, 6.6043853759765625, 6.50177001953125, 6.550025939941406, 6.395912170410156, 6.295967102050781, 6.3747406005859375, 6.292724609375, 6.339263916015625, 6.179046630859375, 6.122779846191406, 6.1351776123046875, 6.051445007324219, 6.090354919433594, 5.985069274902344, 5.882835388183594, 5.916404724121094, 5.8376312255859375, 5.8460235595703125, 5.788230895996094, 5.731010437011719, 5.726051330566406, 5.666160583496094, 5.699920654296875, 5.64117431640625, 5.554389953613281, 5.548286437988281, 5.513954162597656, 5.559349060058594, 5.477333068847656, 5.423545837402344, 5.4180145263671875, 5.3905487060546875, 5.359840393066406, 5.337715148925781, 5.310249328613281, 5.275917053222656, 5.243110656738281, 5.251884460449219, 5.200004577636719, 5.1654815673828125, 5.214881896972656, 5.156898498535156, 5.0640106201171875, 5.049324035644531, 5.029487609863281, 5.096435546875, 5.040931701660156, 5.011177062988281, 4.9488067626953125, 4.915046691894531, 4.929161071777344, 4.913902282714844, 4.887962341308594, 4.852485656738281, 4.7962188720703125, 4.8244476318359375, 4.815483093261719, 4.780387878417969, 4.736137390136719, 4.71343994140625, 4.739952087402344, 4.732322692871094, 4.679107666015625, 4.647636413574219, 4.6112060546875, 4.621696472167969, 4.643440246582031, 4.5886993408203125, 4.572486877441406, 4.5360565185546875, 4.553794860839844, 4.5238494873046875, 4.5223236083984375, 4.457283020019531, 4.488945007324219, 4.487037658691406, 4.4124603271484375, 4.4284820556640625, 4.4097900390625, 4.370307922363281, 4.4162750244140625, 4.37774658203125, 4.349708557128906, 4.307365417480469, 4.2999267578125, 4.2911529541015625, 4.2888641357421875, 4.27398681640625, 4.250335693359375, 4.213523864746094, 4.244422912597656, 4.203605651855469, 4.2057037353515625, 4.1717529296875, 4.181861877441406, 4.171943664550781, 4.171180725097656, 4.114532470703125, 4.106903076171875, 4.0996551513671875, 4.128265380859375, 4.099464416503906, 4.078102111816406, 4.073524475097656, 4.0618896484375, 4.084014892578125, 4.065704345703125, 4.0401458740234375, 4.0203094482421875, 4.0157318115234375, 4.013252258300781, 3.9791107177734375, 3.9913177490234375, 3.9621353149414062, 3.9506912231445312, 3.9793014526367188, 3.9842605590820312, 3.9279937744140625, 3.9289474487304688, 3.9043426513671875, 3.9020538330078125, 3.9079666137695312, 3.8724899291992188, 3.8715362548828125, 3.8358688354492188, 3.8524627685546875, 3.858184814453125, 3.8438796997070312, 3.8106918334960938, 3.7843704223632812, 3.780364990234375, 3.7954330444335938, 3.7677764892578125, 3.7698745727539062, 3.7506103515625, 3.7500381469726562, 3.73687744140625, 3.7088394165039062, 3.7092208862304688, 3.7214279174804688, 3.69110107421875, 3.7014007568359375, 3.6815643310546875, 3.6708831787109375, 3.6371231079101562, 3.641510009765625, 3.6806106567382812, 3.6285400390625, 3.6464691162109375, 3.6357879638671875, 3.6291122436523438, 3.6439895629882812, 3.6153793334960938, 3.638458251953125, 3.6085128784179688, 3.5890579223632812, 3.6182403564453125, 3.5861968994140625, 3.5913467407226562, 3.5800933837890625, 3.5749435424804688, 3.5814285278320312, 3.5573959350585938, 3.5406112670898438, 3.52783203125, 3.534698486328125, 3.5516738891601562, 3.5253524780273438, 3.5173416137695312, 3.5182952880859375, 3.4969329833984375, 3.5257339477539062, 3.4830093383789062, 3.4997940063476562, 3.4872055053710938, 3.4572601318359375, 3.4837722778320312, 3.4540176391601562, 3.4612655639648438, 3.4505844116210938, 3.4433364868164062, 3.467559814453125, 3.4259796142578125, 3.4450531005859375, 3.4343719482421875, 3.4391403198242188, 3.4284591674804688, 3.4252166748046875, 3.4204483032226562, 3.3899307250976562, 3.3924102783203125, 3.4008026123046875, 3.4015655517578125, 3.3740997314453125, 3.3700942993164062, 3.3721923828125, 3.3647537231445312, 3.3533096313476562, 3.3639907836914062, 3.3227920532226562, 3.3252716064453125, 3.3306121826171875, 3.3121109008789062, 3.3214569091796875, 3.3109664916992188, 3.2955169677734375, 3.3258438110351562, 3.2972335815429688, 3.3048629760742188, 3.2939910888671875, 3.260040283203125, 3.277587890625, 3.2907485961914062, 3.2684326171875, 3.27262870841796875]
two_light_point = [6.545066833496094, 6.570243835449219, 6.584739685058594, 6.582450866699219, 6.574821472167969, 6.589508056640625, 6.59027099609375, 6.5921783447265625, 6.587028503417969, 6.580924987792969, 6.5654754638671875, 6.572914123535156, 6.576347351074219, 6.571388244628906, 6.5738677978515625, 6.560707092285156, 6.5486907958984375, 6.53076171875, 6.526947021484375, 6.5303802490234375, 6.5242767333984375, 6.513214111328125, 6.518745422363281, 6.5135955810546875, 6.4971923828125, 6.487846374511719, 6.484794616699219, 6.468772888183594, 6.4678192138671875, 6.4502716064453125, 6.444740295410156, 6.4479827880859375, 6.435203552246094, 6.441497802734375, 6.441307067871094, 6.432342529296875, 6.426429748535156, 6.4174652099609375, 6.4098358154296875, 6.410980224609375, 6.412506103515625, 6.4182281494140625, 6.408882141113281, 6.410789489746094, 6.403923034667969, 6.4037322998046875, 6.403541564941406, 6.402587890625, 6.3899993896484375, 6.3785552978515625, 6.376838684082031, 6.3884735107421875, 6.388282775878906, 6.3678741455078125, 6.3610076904296875, 6.38275146484375, 6.3602447509765625, 6.3564300537109375, 6.356620788574219, 6.338691711425781, 6.360626220703125, 6.344795227050781, 6.3358306884765625, 6.333351135253906, 6.317329406738281, 6.321144104003906, 6.313323974609375, 6.325721740722656, 6.331825256347656, 6.3106536865234375, 6.308746337890625, 6.305122375488281, 6.291770935058594, 6.279945373535156, 6.279754638671875, 6.2744140625, 6.2786102294921875, 6.282234191894531, 6.280708312988281, 6.2541961669921875, 6.2633514404296875, 6.277275085449219, 6.2602996826171875, 6.258583068847656, 6.256294250488281, 6.2541961669921875, 6.23779296875, 6.249427795410156, 6.2541961669921875, 6.25762939453125, 6.248664855957031, 6.238746643066406, 6.241035461425781, 6.239509582519531, 6.2313079833984375, 6.241607666015625, 6.243896484375, 6.229591369628906, 6.215476989746094, 6.2183380126953125, 6.231880187988281, 6.2297821044921875, 6.22711181640625, 6.228446960449219, 6.2252044677734375, 6.213951110839844, 6.2183380126953125, 6.221961975097656, 6.203269958496094, 6.205940246582031, 6.200408935546875, 6.2023162841796875, 6.198883056640625, 6.2023162841796875, 6.191444396972656, 6.1946868896484375, 6.19659423828125, 6.1954498291015625, 6.192970275878906, 6.19049072265625, 6.187248229980469, 6.186676025390625, 6.181526184082031, 6.189537048339844, 6.186676025390625, 6.184959411621094, 6.193733215332031, 6.192588806152344, 6.188392639160156, 6.19049072265625, 6.19354248046875, 6.182861328125, 6.180572509765625, 6.182289123535156, 6.181907653808594, 6.1878204345703125, 6.182861328125, 6.180381774902344, 6.184196472167969, 6.18133544921875, 6.1767578125, 6.1824798583984375, 6.189727783203125, 6.176567077636719, 6.174659729003906, 6.173896789550781, 6.170654296875, 6.1656951904296875, 6.15234375, 6.156158447265625, 6.159400939941406, 6.171226501464844, 6.163215637207031, 6.1611175537109375, 6.1542510986328125, 6.1435699462890625, 6.147575378417969, 6.1389923095703125, 6.145477294921875, 6.137275695800781, 6.138038635253906, 6.139945983886719, 6.125640869140625, 6.1389923095703125, 6.138038635253906, 6.136894226074219, 6.1252593994140625, 6.128883361816406, 6.126594543457031, 6.123161315917969, 6.1191558837890625, 6.118583679199219, 6.1191558837890625, 6.114387512207031, 6.113433837890625, 6.110954284667969, 6.110191345214844, 6.110191345214844, 6.109619140625, 6.104469299316406, 6.097412109375, 6.1000823974609375, 6.1031341552734375, 6.1069488525390625, 6.097412109375, 6.0985565185546875, 6.095314025878906, 6.1046600341796875, 6.096458435058594, 6.09283447265625, 6.097221374511719, 6.0924530029296875, 6.093025207519531, 6.08978271484375, 6.090545654296875, 6.0970306396484375, 6.0901641845703125, 6.096458435058594, 6.087684631347656, 6.0817718505859375, 6.092643737792969, 6.0886383056640625, 6.083869934082031, 6.085968017578125, 6.08978271484375, 6.0947418212890625, 6.092643737792969, 6.0894012451171875, 6.090545654296875, 6.08978271484375, 6.0916900634765625, 6.0791015625, 6.078910827636719, 6.078910827636719, 6.075477600097656, 6.071662902832031, 6.0771942138671875, 6.079864501953125, 6.072044372558594, 6.076240539550781, 6.064414978027344, 6.0626983642578125, 6.061363220214844, 6.059455871582031, 6.061744689941406, 6.058502197265625, 6.060791015625, 6.05010986328125, 6.062507629394531, 6.0581207275390625, 6.061553955078125, 6.062889099121094, 6.069183349609375, 6.06842041015625, 6.06689453125, 6.065940856933594, 6.0626983642578125, 6.0535430908203125, 6.059837341308594, 6.0558319091796875, 6.0680389404296875, 6.060600280761719, 6.061744689941406, 6.049346923828125, 6.049346923828125, 6.048393249511719, 6.048393249511719, 6.048774719238281, 6.0420989990234375, 6.036949157714844, 6.037712097167969, 6.050872802734375, 6.0474395751953125, 6.0459136962890625, 6.047248840332031]
root = '/home/site/data/images/gradient/'
global num
num = 0
def getDifference(img1, img2):
sub = ImageChops.subtract(img1,img2, 0.005)
subt = ImageChops.subtract(img2, img1, 0.005)
return ImageChops.add(sub, subt).convert('RGB')
def getDiff(img1, img2, pic_id):
global num
res = 0
img1 = img1.convert('RGB')
img2 = img2.convert('RGB')
diff = ImageChops.difference(img1, img2)
diff = getDifference(img1, img2)
if not os.path.exists(root + "diff/" + str(pic_id)):
os.makedirs(root + "diff/" + str(pic_id))
diff.save(root + "diff/" + str(pic_id) + '/' + str(num) + '.png')
num += 1
pixels = diff.getdata()
for pixel in pixels:
if pixel != (0,0,0):
res += 1
return res
dir_path = root + "origin/"
dirs = os.listdir(dir_path)
res = array.array('f')
#for i in range(1,1):
for i in range(1,256):
total = 0;
num = 0
print i
res.append(0)
# res[i - 1] = (trans2[i - 1] + trans3[i - 1] + alpha[i - 1]) / 3
# continue
for dir_name1 in dirs:
for dir_name2 in dirs:
img1 = Image.open(dir_path + dir_name1 + '/' + str(i) + '.png')
img2 = Image.open(dir_path + dir_name2 + '/' + str(i) + '.png')
dif = getDiff(img1, img2, i)
res[i - 1] += dif
total += 256 * 256
res[i - 1] = res[i - 1] * 100 / total
# res[i - 1] = (res[i - 1] + alpha[i - 1]) / 2
plt.plot(range(255), simple_light, label = 'diffuse')
plt.plot(range(255), texture, label = 'texture')
plt.plot(range(255), two_light_point, label = 'diffuse and specular')
plt.plot(range(255), point_light, label = 'speculer')
plt.plot(range(255), light_no_am, label = 'diffuse no ambient')
plt.plot(range(255), point_no_am, label = 'specular no ambient')
plt.plot(range(255), trans_chrome, label = 'trans_chrome')
plt.plot(range(255), trans_4_firefox, label = 'trans_chrome')
plt.plot(range(255), res, label = 'res')
plt.legend(bbox_to_anchor=(1, 1), loc=1, borderaxespad=0., fontsize = 10)
plt.savefig(root + 'res.png')
print res
|
#Exercícios Numpy-01
#*******************
import numpy as np
|
v = float(input('Qual é a quantidade de metros? '))
cm = v * 100
mm = v * 1000
print('A quantidade de metros em cm é {} e em mm é {}.'.format(cm, mm))
|
import string
from math import inf
from tempfile import TemporaryDirectory
from os.path import join
BLOCK_FILE_NAME = 'block_{}.dat'
class ExternalSort:
"""External sort of a file with integer numbers (separated by spaces).
"""
def __init__(self, input_file_name, block_size, output_file_name):
if block_size < 2:
raise ValueError('Block size should be integer greater than one.')
self.input_file_name = input_file_name
self.block_size = block_size
self.output_file_name = output_file_name
self.tempdir = TemporaryDirectory()
def run(self):
"""Run algorithm of external sort.
"""
first_index, last_index = \
self.get_initial_blocks()
while last_index - first_index > self.block_size:
first_index, last_index = \
self.join_blocks(first_index, last_index)
self.merge_blocks(self.output_file_name, first_index, last_index)
def get_initial_blocks(self):
"""Create files with sorted blocks from fname.
Return indeces of saved blocks.
"""
block = []
index = 0
for number in self.numbers_from_file(self.input_file_name):
block.append(number)
if len(block) == self.block_size:
block.sort()
self.write_block(index, block)
block = []
index += 1
else:
if block:
block.sort()
self.write_block(index, block)
index += 1
return 0, index
def numbers_from_file(self, fname):
"""Generator for numbers in a file.
"""
with open(fname, 'r') as file:
number = ''
while True:
character = file.read(1)
if character == '':
if number:
yield int(number)
break
elif character in string.digits + '-':
number += character
elif character == ' ' or character == '\n':
if number:
yield int(number)
number = ''
else:
raise ValueError('Not a number in a file.')
def write_block(self, index, block):
"""Write the block to a file with given index.
"""
writer = self.numbers_to_file(
join(self.tempdir.name, BLOCK_FILE_NAME.format(index)))
next(writer)
for number in block:
writer.send(number)
writer.close()
def numbers_to_file(self, fname):
"""Coroutine that saves numbers to a file.
"""
with open(fname, 'w') as output_file:
empty = True
while True:
number = (yield)
if not empty:
output_file.write(' ')
output_file.write(str(number))
empty = False
def join_blocks(self, first_index, last_index):
"""Join blocks into bigger ones and return their indices.
"""
index = last_index
for i in range(first_index, last_index, self.block_size):
if i + self.block_size <= last_index:
self.merge_blocks(
join(self.tempdir.name, BLOCK_FILE_NAME.format(index)),
i, i+self.block_size)
else:
self.merge_blocks(
join(self.tempdir.name, BLOCK_FILE_NAME.format(index)),
i, last_index)
index += 1
return last_index, index
def merge_blocks(self, fname, first_index, last_index):
"""Merge blocks into one and save to a file with given name.
"""
number_generators = [self.numbers_from_file(
join(self.tempdir.name, BLOCK_FILE_NAME.format(index)))
for index in range(first_index, last_index)]
numbers = [next(number_generator)
for number_generator in number_generators]
writer = self.numbers_to_file(fname)
next(writer)
while True:
minvalue, minindex = self.argmin(numbers)
if minindex is None:
break
writer.send(minvalue)
try:
numbers[minindex] = next(number_generators[minindex])
except StopIteration:
numbers[minindex] = inf
writer.close()
def argmin(self, array):
"""Find minimum value and its index.
"""
minvalue = inf
minindex = None
for i in range(len(array)):
if array[i] < minvalue:
minvalue = array[i]
minindex = i
return minvalue, minindex
def external_sort(input_file_name, block_size, output_file_name=None):
"""External sort of a file with integer numbers (separated by spaces).
"""
if output_file_name is None:
output_file_name = input_file_name
sorter = ExternalSort(input_file_name, block_size, output_file_name)
sorter.run()
|
import unittest
from fsap.sepr import section_property as sp
class SectionPropertyTest(unittest.TestCase):
def setUp(self):
self.points1 = [(0.0,0.0),(1.0,0.0),(1.0,1.0),(0.0,1.0)]
self.points2 = [(0.0,0.0),(1.0,1.0),(1.0,0.0),(0.0,1.0)]
def test_centroid(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.centroid.x, 0.5, places=3, msg="Incorrect center x-coord")
self.assertAlmostEqual(sp1.centroid.y, 0.5, places=3, msg="Incorrect center y-coord")
def test_order_points(self):
sp2 = sp.SectionProperty(self.points2)
self.assertItemsEqual(sp2.points[0].cc(),(1.0, 1.0), "Incorrect first point")
self.assertItemsEqual(sp2.points[1].cc(),(1.0, 0.0), "Incorrect second point")
self.assertItemsEqual(sp2.points[2].cc(),(0.0, 0.0), "Incorrect third point")
self.assertItemsEqual(sp2.points[3].cc(),(0.0, 1.0), "Incorrect fourth point")
self.assertItemsEqual(sp2.points[4].cc(),(1.0, 1.0), "Incorrect fifth point")
def test_max_y(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.max_y, 1.0, "Incorrect max y")
def test_max_x(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.max_x, 1.0, "Incorrect max x " +
str(sp1.max_x))
def test_min_y(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.min_y, 0.0, "Incorrect min y")
def test_min_x(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.min_x, 0.0, "Incorrect min x")
def test_bounding_box(self):
sp1 = sp.SectionProperty(self.points1)
self.assertItemsEqual(sp1.box,(1.0, 1.0, 0.0, 0.0), "Incorrect box")
def test_width(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.width, 1.0, "Incorrect width")
def test_height(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.height, 1.0, "Incorrect height")
def test_area(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.area, 1.0, places=3, msg="Incorrect area " + str(sp1.area))
def test_ena_x(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.ena_x, 0.5, places=3, msg="Incorrect ena_x")
def test_ena_y(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.ena_y, 0.5, places=3, msg="Incorrect ena_y")
def test_ixx_x(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.ixx_x, 0.333, places=3, msg="Incorrect ixx_x " + str(sp1.ixx_x))
def test_iyy_y(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.iyy_y, 0.333, places=3, msg="Incorrect iyy_y " + str(sp1.iyy_y))
def test_ixx_c(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.ixx_c, 0.083, places=3, msg="Incorrect ixx_c " + str(sp1.ixx_c))
def test_iyy_c(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.iyy_c, 0.083, places=3, msg="Incorrect iyy_c " + str(sp1.iyy_c))
def test_ixy_xy(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.ixy_xy, 0.25, places=3, msg="Incorrect ixy_xy " +
str(sp1.ixy_xy))
def test_ixy_c(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.ixy_c, 0.5, places=3, msg="Incorrect ixy_x " + str(sp1.ixy_c))
def test_sxxt(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.sxxt, 0.1666, places=3, msg="Incorrect sxxt "
+ str(sp1.sxxt))
def test_sxxb(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.sxxb, 0.1666, places=3, msg="Incorrect sxxb "
+ str(sp1.sxxb))
def test_syyr(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.syyr, 0.1666, places=3, msg="Incorrect syyr "
+ str(sp1.syyr))
def test_syyl(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.syyl, 0.1666, places=3, msg="Incorrect syyl "
+ str(sp1.syyl))
def test_rx(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.rx, 0.2886, places=3, msg="Incorrect rx "
+ str(sp1.rx))
def test_ry(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.ry, 0.2886, places=3, msg="Incorrect ry "
+ str(sp1.ry))
def test_rmin(self):
sp1 = sp.SectionProperty(self.points1)
self.assertAlmostEqual(sp1.rmin, 0.2886, places=3, msg="Incorrect rmin "
+ str(sp1.rmin))
|
# -*- coding: utf-8 -*-
"""unittest
"""
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from fastsst.util.linear_algebra import *
class TestLinearAlgebra(unittest.TestCase):
def setUp(self):
np.random.seed(1234)
self.A = np.random.rand(10,10)
self.x0 = np.random.rand(10)
self.x0 /= np.linalg.norm(self.x0)
d = 3*np.ones(10)
e = -1*np.ones(10 - 1)
self.T = np.diag(d) + np.diag(e,k=1) + np.diag(e,k=-1)
def tearDown(self):
pass
def test_power_method(self):
u_pow,s_pow,_ = power_method(self.A,self.x0,n_iter=100)
U_np,s_np,_ = np.linalg.svd(self.A)
assert_almost_equal(s_pow,s_np[0])
assert_almost_equal(np.abs(u_pow),np.abs(U_np[:,0]))
def test_eig_tridiag(self):
U_tri,s_tri = eig_tridiag(self.T)
U_np,s_np,_ = np.linalg.svd(self.T)
assert_almost_equal(s_tri,s_np)
assert_almost_equal(np.abs(U_tri),np.abs(U_np))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
from django.core.management import BaseCommand
from django.core.paginator import Paginator
from guardian.shortcuts import assign_perm
from grandchallenge.reader_studies.models import Answer
class Command(BaseCommand):
def handle(self, *args, **options):
answers = Answer.objects.all().select_related("creator")
paginator = Paginator(answers, 100)
print(f"Found {paginator.count} answers")
for idx in paginator.page_range:
print(f"Page {idx} of {paginator.num_pages}")
page = paginator.page(idx)
for answer in page.object_list:
assign_perm(
f"change_{answer._meta.model_name}", answer.creator, answer
)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .alert_policy import *
from .get_alert_policy import *
from .get_group import *
from .get_metric_descriptor import *
from .get_notification_channel import *
from .get_service import *
from .get_service_level_objective import *
from .get_uptime_check_config import *
from .group import *
from .metric_descriptor import *
from .notification_channel import *
from .service import *
from .service_level_objective import *
from .uptime_check_config import *
from ._inputs import *
from . import outputs
|
from config import general
def printProgressBar(
iteration,
total,
decimals=0,
length=10,
fill="#",
printEnd="\n",
):
"""
Calls in a loop to create terminal progress bar.
Args:
iteration (int): Current iteration
total (int): Total iterations (Int)
prefix (str): Prefix string
suffix (str): Suffix string
decimals (int): Positive number of decimals in percent complete
length (int): Character length of bar
fill (str): Bar fill character
printEnd (str): End character (e.g. "\r", "\r\n")
"""
if general["progressBar"]:
filledLength = int(length * iteration // total)
print(
f"\r|{fill * filledLength + '-' * (length - filledLength)}| "
+ f"{('{0:.' + str(decimals)+ 'f}').format(100 * (iteration / float(total)))}"
+ f"% | {total - iteration}/{total} to go",
end=printEnd,
)
if iteration == total:
print()
|
#!/usr/bin/env python3
# This software is Copyright (c) 2018, Dhiru Kholia <dhiru at openwall.com> and
# it is hereby released to the general public under the following terms:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted.
#
# Special thanks goes to https://github.com/NODESPLIT/tz-brute and Michael Senn
# (@MikeSenn on Telegram) for helping me bootstrap this project.
import re
import os
import sys
import json
import hashlib
import binascii
PY3 = sys.version_info[0] == 3
if not PY3:
print("This program requires Python 3.6+ to run.")
sys.exit(0)
### Borrowed code starts, The MIT License (MIT), Copyright (c) 2013 Vitalik Buterin, https://github.com/vbuterin/pybitcointools ###
def bytes_to_hex_string(b):
if isinstance(b, str):
return b
return ''.join('{:02x}'.format(y) for y in b)
def safe_from_hex(s):
return bytes.fromhex(s)
def from_int_representation_to_bytes(a):
return bytes(str(a), 'utf-8')
def from_int_to_byte(a):
return bytes([a])
def from_byte_to_int(a):
return a
def from_string_to_bytes(a):
return a if isinstance(a, bytes) else bytes(a, 'utf-8')
def safe_hexlify(a):
return str(binascii.hexlify(a), 'utf-8')
string_types = (str)
string_or_bytes_types = (str, bytes)
int_types = (int, float)
# Base switching
code_strings = {
2: '01',
10: '0123456789',
16: '0123456789abcdef',
32: 'abcdefghijklmnopqrstuvwxyz234567',
58: '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz',
256: ''.join([chr(x) for x in range(256)])
}
def encode(val, base, minlen=0):
base, minlen = int(base), int(minlen)
code_string = get_code_string(base)
result_bytes = bytes()
while val > 0:
curcode = code_string[val % base]
result_bytes = bytes([ord(curcode)]) + result_bytes
val //= base
pad_size = minlen - len(result_bytes)
padding_element = b'\x00' if base == 256 else b'1' \
if base == 58 else b'0'
if (pad_size > 0):
result_bytes = padding_element*pad_size + result_bytes
result_string = ''.join([chr(y) for y in result_bytes])
result = result_bytes if base == 256 else result_string
return result
def decode(string, base):
if base == 256 and isinstance(string, str):
string = bytes(bytearray.fromhex(string))
base = int(base)
code_string = get_code_string(base)
result = 0
if base == 256:
def extract(d, cs):
return d
else:
def extract(d, cs):
return cs.find(d if isinstance(d, str) else chr(d))
if base == 16:
string = string.lower()
while len(string) > 0:
result *= base
result += extract(string[0], code_string)
string = string[1:]
return result
def bin_dbl_sha256(s):
bytes_to_hash = from_string_to_bytes(s)
return hashlib.sha256(hashlib.sha256(bytes_to_hash).digest()).digest()
def lpad(msg, symbol, length):
if len(msg) >= length:
return msg
return symbol * (length - len(msg)) + msg
def get_code_string(base):
if base in code_strings:
return code_strings[base]
else:
raise ValueError("Invalid base!")
def changebase(string, frm, to, minlen=0):
if frm == to:
return lpad(string, get_code_string(frm)[0], minlen)
return encode(decode(string, frm), to, minlen)
def b58check_to_bin(inp):
leadingzbytes = len(re.match('^1*', inp).group(0))
data = b'\x00' * leadingzbytes + changebase(inp, 58, 256)
assert bin_dbl_sha256(data[:-4])[:4] == data[-4:]
return data[1:-4]
### Borrowed code ends ####
if __name__ == "__main__":
if len(sys.argv) == 2: # internal https://faucet.tzalpha.net/ files testing mode
filename = sys.argv[1]
data = open(filename).read()
data = json.loads(data)
mnemonic, email, address = (" ".join(data["mnemonic"]), data["email"], data["pkh"])
raw_address = binascii.hexlify(b58check_to_bin(address)).decode("ascii")
print("%s:$tezos$1*%s*%s*%s*%s*%s" % ("dummy", 2048, mnemonic, email, address, raw_address))
sys.exit(0)
if len(sys.argv) < 4:
sys.stderr.write("Usage: %s \'mnemonic data (15 words)\' \'email\' \'public key\'\n" %
sys.argv[0])
sys.stderr.write("""\nExample: %s 'put guide flat machine express cave hello connect stay local spike ski romance express brass' 'jbzbdybr.vpbdbxnn@tezos.example.org' 'tz1eTjPtwYjdcBMStwVdEcwY2YE3th1bXyMR'\n""" % sys.argv[0])
sys.exit(-1)
mnemonic, email, address = sys.argv[1:4]
if len(email) > 51:
sys.stderr.write("[WARNING] Very large salt (email address) found, which is unsupported by tezos-opencl format!\n")
raw_address = binascii.hexlify(b58check_to_bin(address)).decode("ascii")
print("%s:$tezos$1*%s*%s*%s*%s*%s" % ("dummy", 2048, mnemonic, email, address, raw_address))
|
import torch
import math
import torch.nn.init
import torch.nn as nn
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import numpy as np
import torch.nn.functional as F
from Utils import L2Norm
def getPoolingKernel(kernel_size = 25):
step = 1. / float(np.floor( kernel_size / 2.))
x_coef = np.arange(step/2., 1. ,step)
xc2 = np.hstack([x_coef,[1], x_coef[::-1]])
kernel = np.outer(xc2.T,xc2)
kernel = np.maximum(0,kernel)
return kernel
def get_bin_weight_kernel_size_and_stride(patch_size, num_spatial_bins):
bin_weight_stride = int(round(2.0 * math.floor(patch_size / 2) / float(num_spatial_bins + 1)))
bin_weight_kernel_size = int(2 * bin_weight_stride - 1)
return bin_weight_kernel_size, bin_weight_stride
# PyTorch implementation of SIFT descriptor
class SIFTNet(nn.Module):
def CircularGaussKernel(self, kernlen=21):
halfSize = kernlen / 2
r2 = float(halfSize*halfSize)
sigma2 = 0.9 * r2
disq = 0
kernel = np.zeros((kernlen, kernlen))
for y in xrange(kernlen):
for x in xrange(kernlen):
disq = (y - halfSize)*(y - halfSize) + (x - halfSize)*(x - halfSize)
if disq < r2:
kernel[y,x] = math.exp(-disq / sigma2)
else:
kernel[y,x] = 0.
return kernel
def __init__(self, patch_size = 65, num_ang_bins = 8, num_spatial_bins = 4, clipval = 0.2):
super(SIFTNet, self).__init__()
gk = torch.from_numpy(self.CircularGaussKernel(kernlen=patch_size).astype(np.float32))
self.bin_weight_kernel_size, self.bin_weight_stride = get_bin_weight_kernel_size_and_stride(patch_size, num_spatial_bins)
self.gk = Variable(gk)
self.num_ang_bins = num_ang_bins
self.num_spatial_bins = num_spatial_bins
self.clipval = clipval
self.gx = nn.Sequential(nn.Conv2d(1, 1, kernel_size=(1,3), bias = False))
for l in self.gx:
if isinstance(l, nn.Conv2d):
l.weight.data = torch.from_numpy(np.array([[[[-1, 0, 1]]]], dtype=np.float32))
self.gy = nn.Sequential(nn.Conv2d(1, 1, kernel_size=(3,1), bias = False))
for l in self.gy:
if isinstance(l, nn.Conv2d):
l.weight.data = torch.from_numpy(np.array([[[[-1], [0], [1]]]], dtype=np.float32))
self.pk = nn.Sequential(nn.Conv2d(1, 1, kernel_size=(self.bin_weight_kernel_size, self.bin_weight_kernel_size),
stride = (self.bin_weight_stride, self.bin_weight_stride),
bias = False))
for l in self.pk:
if isinstance(l, nn.Conv2d):
nw = getPoolingKernel(kernel_size = self.bin_weight_kernel_size)
new_weights = np.array(nw.reshape((1, 1, self.bin_weight_kernel_size, self.bin_weight_kernel_size)))
l.weight.data = torch.from_numpy(new_weights.astype(np.float32))
def forward(self, x):
gx = self.gx(F.pad(x, (1,1,0, 0), 'replicate'))
gy = self.gy(F.pad(x, (0,0, 1,1), 'replicate'))
mag = torch.sqrt(gx * gx + gy * gy + 1e-10)
ori = torch.atan2(gy,gx + 1e-8)
if x.is_cuda:
self.gk = self.gk.cuda()
else:
self.gk = self.gk.cpu()
mag = mag * self.gk.expand_as(mag)
o_big = (ori +2.0 * math.pi )/ (2.0 * math.pi) * float(self.num_ang_bins)
bo0_big = torch.floor(o_big)
wo1_big = o_big - bo0_big
bo0_big = bo0_big % self.num_ang_bins
bo1_big = (bo0_big + 1) % self.num_ang_bins
wo0_big = (1.0 - wo1_big) * mag
wo1_big = wo1_big * mag
ang_bins = []
for i in range(0, self.num_ang_bins):
ang_bins.append(self.pk((bo0_big == i).float() * wo0_big + (bo1_big == i).float() * wo1_big))
ang_bins = torch.cat(ang_bins,1)
ang_bins = ang_bins.view(ang_bins.size(0), -1)
ang_bins = L2Norm()(ang_bins)
ang_bins = torch.clamp(ang_bins, 0.,float(self.clipval))
ang_bins = L2Norm()(ang_bins)
return ang_bins
|
import numpy as np
import copy
class Maze(object):
def __init__(self):
super(Maze, self).__init__()
self.n_action = 2
self.len_state = 2
self.x = 0.1
self.y = 0.1
def reset(self):
self.x = 0.1
self.y = 0.1
return self.get_state()
def set_state(self,x,y):
self.x = np.clip(x,0,1)
self.y = np.clip(y,0,1)
return self.get_state()
def get_state(self):
return np.array([self.x,self.y])
def step(self,actions):
reward = 0
done = False
x1 = np.clip(self.x + 0.2*np.clip(actions[0],-1,1),0,1)
y1 = np.clip(self.y + 0.2*np.clip(actions[1],-1,1),0,1)
if (x1>=0.2)&(x1<=0.4)&(y1>=0)&(y1<=0.8):
pass
elif (x1>=0.6)&(x1<=0.8)&(y1>=0.2)&(y1<=1.0):
pass
else:
self.x = x1
self.y = y1
if (self.x-0.9)**2+(self.y-0.9)**2 < 0.02:
reward = 10
done = True
return self.get_state(), reward, done
|
from random import randint
from collections import OrderedDict
# never let the children number get below zero
def nozero(inteiro):
if inteiro > 0:
return inteiro
else:
return 0
lista = []
def main(num_ploxys,num_foods,min_lifespan,max_lifespan):
stringui = str(num_ploxys) + "," + str(num_foods) + "," + str(min_lifespan) + "," + str(max_lifespan)
# here the ploxys are creates, each ploxy is a list holding its x, its y , how many roudns it will live, his birthdate and his generation and how many children it will give birth respectively
ploxys = [[randint(1,900),randint(1,900),randint(min_lifespan,max_lifespan),randint(((-1) * (min_lifespan - 1)),0),0,randint(1,4)] for i in range(0,num_ploxys) ]
# here the inital foods are create, each one is a list holding its position in x and y
foods = [[randint(1,901),randint(1,901)] for i in range (0,num_foods)]
conta = 0
# the simulation starts
while True:
if len(ploxys) == 0 or len(ploxys) > 1000 or conta >= 10000:
return conta
break
# each ploxy moves ramdomly from -5 to 5 pixels in each round, if the ploxy lifespan is over, the ploxy does not goes into the next round
ploxys = [ [(ploxy[0] + randint(-5,5)) % 900 ,(ploxy[1] + randint(-5,5)) % 900,ploxy[2],ploxy[3], ploxy[4],ploxy[5]] for ploxy in ploxys if ploxy[2] + ploxy[3] > conta]
# it chekcs if any food has been eaten, if it has, the food will be deleted
for food in foods:
eaten = False
for ploxy in ploxys:
if [ploxy[0],ploxy[1]] == food:
eaten = True
# the ploxy that ate the food, generates children and a new food is generated
for i in range(0,ploxy[5]):
ploxys.append([ploxy[0],ploxy[1],ploxy[2] + randint(-5,5),conta,ploxy[4] + 1,nozero(ploxy[5] + randint(-1,1))])
foods.append([randint(0,900),randint(0,900)])
break
# if the food is eaten, it is removed
if eaten == False:
pass
else:
foods.remove(food)
#if all the ploxs are dead, the population reached the 1000 limit or the round is 10000 it finishes the simulation and return how many rounds they survived
if len(ploxys) == 0 or len(ploxys) > 1000 or conta >= 10000:
return conta
break
conta += 1
|
# 3rd party
import lxml.etree # type: ignore
import lxml.objectify # type: ignore
# this package
from mh_utils.xml import XMLFileMixin
from tests.test_worklist_parser.test_integration import worklist_file
class MixinSubclass(XMLFileMixin):
@classmethod
def from_xml(cls, element: lxml.objectify.ObjectifiedElement):
return element
def test_from_xml_file():
assert MixinSubclass.from_xml_file(worklist_file).Version
assert MixinSubclass.from_xml_file(worklist_file).WorklistInfo is not None
assert isinstance(MixinSubclass.from_xml_file(worklist_file), lxml.objectify.ObjectifiedElement)
|
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import sys
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.module_utils.six.moves import builtins
from ansible.module_utils._text import to_native
import pprint
realimport = builtins.__import__
class TestPostgres(unittest.TestCase):
def clear_modules(self, mods):
for mod in mods:
if mod in sys.modules:
del sys.modules[mod]
@patch.object(builtins, '__import__')
def test_postgres_pg2_missing_ensure_libs(self, mock_import):
def _mock_import(name, *args, **kwargs):
if name == 'psycopg2':
raise ImportError
return realimport(name, *args, **kwargs)
self.clear_modules(['psycopg2', 'ansible.module_utils.postgres'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.postgres')
self.assertFalse(mod.module_utils.postgres.HAS_PSYCOPG2)
with self.assertRaises(mod.module_utils.postgres.LibraryError) as context:
mod.module_utils.postgres.ensure_libs(sslrootcert=None)
self.assertIn('psycopg2 is not installed', to_native(context.exception))
@patch.object(builtins, '__import__')
def test_postgres_pg2_found_ensure_libs(self, mock_import):
def _mock_import(name, *args, **kwargs):
if 'psycopg2' in name:
return MagicMock()
return realimport(name, *args, **kwargs)
self.clear_modules(['psycopg2', 'ansible.module_utils.postgres'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.postgres')
self.assertTrue(mod.module_utils.postgres.HAS_PSYCOPG2)
ensure_ret = mod.module_utils.postgres.ensure_libs(sslrootcert=None)
self.assertFalse(ensure_ret)
pprint.pprint(ensure_ret)
@patch.object(builtins, '__import__')
def test_postgres_pg2_found_ensure_libs_old_version(self, mock_import):
def _mock_import(name, *args, **kwargs):
if 'psycopg2' in name:
m = MagicMock()
m.__version__ = '2.4.1'
return m
return realimport(name, *args, **kwargs)
self.clear_modules(['psycopg2', 'ansible.module_utils.postgres'])
mock_import.side_effect = _mock_import
mod = builtins.__import__('ansible.module_utils.postgres')
self.assertTrue(mod.module_utils.postgres.HAS_PSYCOPG2)
with self.assertRaises(mod.module_utils.postgres.LibraryError) as context:
mod.module_utils.postgres.ensure_libs(sslrootcert='yes')
self.assertIn('psycopg2 must be at least 2.4.3 in order to use', to_native(context.exception))
|
"""Create database models to represent tables."""
from events_app import db
from sqlalchemy.orm import backref
import enum
class Guest(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
email = db.Column(db.String)
phone = db.Column(db.String)
events_attending = db.relationship("Event", secondary="guest_events")
class PartyType(enum.Enum):
Party = 1
Study = 2
Networking = 3
class Event(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String)
description = db.Column(db.String)
date_and_time = db.Column(db.DateTime)
event_type = db.Column(db.Enum(PartyType), default=PartyType.Party)
guests = db.relationship("Guest", secondary="guest_events")
guest_event_table = db.Table(
"guest_events",
db.Column("event_id", db.Integer, db.ForeignKey("event.id")),
db.Column("guest_id", db.Integer, db.ForeignKey("guest.id"))
)
|
from tests.check import check_result
def test_01():
source = """
def foo(s):
if isinstance(s, unicode):
return s.encode("utf-8")
"""
expected = """
from future.utils import text_type
def foo(s):
if isinstance(s, text_type):
return s.encode("utf-8")
"""
check_result(source, expected)
def test_02():
source = """
def foo(s):
if isinstance(s, unicode):
return map(lambda c:c.encode("utf-8"), s)
"""
expected = """
from builtins import map
from future.utils import text_type
def foo(s):
if isinstance(s, text_type):
return list(map(lambda c:c.encode("utf-8"), s))
"""
check_result(source, expected)
def test_03():
source = """
def foo(d):
for k, v in d.iterkeys():
print("{}={}".format(k, v))
"""
expected = """
from future.utils import iterkeys
def foo(d):
for k, v in iterkeys(d):
print("{}={}".format(k, v))
"""
check_result(source, expected)
def test_04():
source = """
def foo(d):
for k in d.iterkeys():
print("{}".format(k))
for k, v in d.iteritems():
print("{}={}".format(k, v))
for v in d.itervalues():
print("={}".format(v))
"""
expected = """
from future.utils import iteritems, iterkeys, itervalues
def foo(d):
for k in iterkeys(d):
print("{}".format(k))
for k, v in iteritems(d):
print("{}={}".format(k, v))
for v in itervalues(d):
print("={}".format(v))
"""
check_result(source, expected)
|
#!/usr/bin/env python
import datetime
print(datetime.datetime.now())
import argparse,logging
<<<<<<< HEAD
import torch,time
from ptflops import get_model_complexity_info
=======
import torch,time,datetime
from ptflops import get_model_complexity_info
import numpy as np
>>>>>>> a2f58d0afe8cfc0fc3d7bc3a3c6f7f55da08b5fb
logger = logging.getLogger(__name__)
start = time.time()
print(datetime.datetime.now())
def main():
print('main',time.time() - start)
''' simple starter program that can be copied for use when starting a new script. '''
logging_format = '%(asctime)s %(levelname)s:%(name)s:%(message)s'
logging_datefmt = '%Y-%m-%d %H:%M:%S'
logging_level = logging.INFO
parser = argparse.ArgumentParser(description='')
parser.add_argument('-n','--runs',help='number of trials to run',type=int,required=True)
parser.add_argument('-b','--batch_size',help='batch size for inputs',type=int,default=10)
parser.add_argument('--input_width',help='width of input',type=int,default=500)
parser.add_argument('--input_height',help='height of input',type=int,default=500)
parser.add_argument('-c','--input_channels',help='number of channels of input',type=int,default=3)
parser.add_argument('-f','--filters',help='number of filters in CNN',type=int,default=64)
parser.add_argument('-k','--kernel_size',help='kernel size',type=int,default=10)
parser.add_argument('--stride',help='stride',type=int,default=1)
parser.add_argument('--padding',help='padding',type=int,default=1)
parser.add_argument('--bias',help='use bias',default=False, action='store_true')
parser.add_argument('--backward',help='run backward pass',default=False, action='store_true')
parser.add_argument('--opt',help='optimizer for backward measurements',default='adam')
parser.add_argument('--debug', dest='debug', default=False, action='store_true', help="Set Logger to DEBUG")
parser.add_argument('--error', dest='error', default=False, action='store_true', help="Set Logger to ERROR")
parser.add_argument('--warning', dest='warning', default=False, action='store_true', help="Set Logger to ERROR")
parser.add_argument('--logfilename',dest='logfilename',default=None,help='if set, logging information will go to file')
args = parser.parse_args()
if args.debug and not args.error and not args.warning:
logging_level = logging.DEBUG
elif not args.debug and args.error and not args.warning:
logging_level = logging.ERROR
elif not args.debug and not args.error and args.warning:
logging_level = logging.WARNING
logging.basicConfig(level=logging_level,
format=logging_format,
datefmt=logging_datefmt,
filename=args.logfilename)
logger.info('start %s',time.time() - start)
run_conv2d(batch_size=args.batch_size,
input_shape=(args.input_width,args.input_height),
in_channels = args.input_channels,
out_channels = args.filters,
kernel_size = (args.kernel_size,args.kernel_size),
stride = args.stride,
padding = args.padding,
bias = args.bias,
opt = args.opt,
backward = args.backward,
runs = args.runs)
logger.info('end %s',time.time() - start)
def run_conv2d(batch_size=10,
input_shape=(500,500),
in_channels = 3,
out_channels = 64,
kernel_size = (10,10),
stride = 1,
padding = 1,
bias = True,
opt = 'adam',
backward = False,
runs = 10,
):
inputs = torch.arange(batch_size * in_channels * input_shape[0] * input_shape[1],dtype=torch.float).view((batch_size,in_channels) + input_shape)
logger.info('inputs.shape %s',inputs.shape)
#torch.rand((batch_size,in_channels) + input_shape)
targets = torch.arange(batch_size * out_channels * input_shape[0] * input_shape[1],dtype=torch.float).view((batch_size,out_channels) + input_shape)
logger.info('targets.shape %s',targets.shape)
#torch.rand((batch_size,out_channels) + input_shape)
logger.info('start run_conv2d %s',time.time() - start)
layer = torch.nn.Conv2d(in_channels,out_channels,kernel_size,stride=stride,padding=padding,bias=bias)
flops, params = get_model_complexity_info(layer, tuple(inputs.shape[1:]))
logger.info(' flops = %s params = %s',flops,params)
flops, params = get_model_complexity_info(layer,tuple(inputs.shape[1:]),as_strings=True, print_per_layer_stat=True)
logger.info('Flops: %s',flops)
logger.info('Params: %s',params)
if 'adam' in opt:
opt = torch.optim.Adam(layer.parameters())
else:
opt = torch.optim.SGD(layer.parameters())
loss_func = torch.nn.MSELoss()
start_loop = time.time()
logger.info('loop run_conv2d %s',time.time() - start)
for _ in range(runs):
outputs = layer(inputs)
if backward:
loss = loss_func(outputs,targets)
loss.backward()
opt.step()
opt.zero_grad()
loss.zero_grad()
duration = time.time() - start_loop
logger.info('loop run time: %s and time per iteration: %s',duration,duration / runs)
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
f = open(filename, 'r')
dict = {}
year = None
for line in f:
match_year = re.search(r'>Popularity\s+in\s+(\d\d\d\d)<', line)
match_names_rank = re.search(
r'>(\d+)</td><td>([a-zA-Z]+)</td><td>([a-zA-Z]+)<', line)
if match_year:
year = match_year.group(1)
elif match_names_rank:
dict[match_names_rank.group(2)] = match_names_rank.group(1)
dict[match_names_rank.group(3)] = match_names_rank.group(1)
result_list = [str(year)]
for k, v in sorted(dict.items(), key=lambda x : x[0]):
result_list.append(f'{k} {v}')
f.close()
return result_list
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print('usage: [--summaryfile] file [file ...]')
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
if summary:
for arg in args:
with open('summary.txt', mode='a') as file:
file.write('\n'.join(extract_names(arg)))
file.write('\n\n')
else:
for arg in args:
print(extract_names(arg), '\n')
if __name__ == '__main__':
main()
|
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^treewidget/', include('treewidget.urls')),
]
from django.conf import settings
from django.conf.urls import include, url
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
# Copyright (C) 2018 Verizon. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
from .affected_vnfcs import AffectedVnfcsSerializer
from .affected_vls import AffectedVLsSerializer
from .affected_storages import AffectedStoragesSerializer
from lcm.nf.const import LCM_OPERATION_TYPES, LCM_OPERATION_STATE_TYPES
from .link import linkSerializer
from .response import ProblemDetailsSerializer
from .ext_virtual_link_info import ExtVirtualLinkInfoSerializer
from .vnf_info_modifications import VnfInfoModificationsSerializer
class LccnLinksSerializer(serializers.Serializer):
vnfInstance = linkSerializer(
help_text="Link to the resource representing the VNF instance to "
"which the notified change applies.",
required=True,
allow_null=False)
subscription = linkSerializer(
help_text="Link to the related subscription.",
required=True,
allow_null=False)
vnfLcmOpOcc = linkSerializer(
help_text="Link to the VNF lifecycle management operation"
"occurrence that this notification is related to. Shall be"
"present if there is a related lifecycle operation occurance.",
required=False,
allow_null=False)
class VnfLcmOperationOccurrenceNotification(serializers.Serializer):
id = serializers.CharField(
help_text="Identifier of this notification",
max_length=255,
required=True,
allow_null=False)
notificationType = serializers.CharField(
help_text="Type of the notification",
max_length=50,
required=True,
allow_null=False)
subscriptionId = serializers.CharField(
help_text="Identifier for the subscription",
required=False)
timeStamp = serializers.CharField(
help_text="Date-time of the generation of the notification.",
required=True)
notificationStatus = serializers.ChoiceField(
help_text="Indicates whether this notification reports about the start"
"of a lifecycle operation or the result of a lifecycle"
"operation",
choices=["START", "RESULT"],
required=True)
operationState = serializers.ChoiceField(
choices=LCM_OPERATION_STATE_TYPES,
help_text="The state of the VNF LCM operation occurrence. ",
required=True)
vnfInstanceId = serializers.CharField(
help_text="The identifier of the VNF instance affected. ",
required=True)
operation = serializers.ChoiceField(
help_text="The lifecycle management operation.",
required=True,
choices=LCM_OPERATION_TYPES)
isAutomaticInvocation = serializers.BooleanField(
help_text="Set to true if this VNF LCM operation occurrence has"
"been triggered by an automated procedure inside the"
"VNFM. Otherwise False",
required=True)
vnfLcmOpOccId = serializers.CharField(
help_text="The identifier of the VNF lifecycle management"
"operation occurrence associated to the notification.",
required=True)
affectedVnfcs = AffectedVnfcsSerializer(
help_text="Information about VNFC instances that were affected " +
"during the lifecycle operation.",
required=False,
many=True
)
affectedVirtualLinks = AffectedVLsSerializer(
help_text="Information about VL instances that were affected " +
"during the lifecycle operation. ",
required=False,
many=True
)
affectedVirtualStorages = AffectedStoragesSerializer(
help_text="Information about virtualised storage instances that " +
"were affected during the lifecycle operation",
required=False,
many=True
)
changedInfo = VnfInfoModificationsSerializer(
help_text="Information about the changed VNF instance information, " +
"including VNF configurable properties",
required=False,
allow_null=True)
changedExtConnectivity = ExtVirtualLinkInfoSerializer(
help_text="Information about changed external connectivity, if this " +
"notification represents the result of a lifecycle operation occurrence. " +
"Shall be present if the 'notificationStatus' is set to 'RESULT' and the " +
"'operation' is set to 'CHANGE_EXT_CONN'. Shall be absent otherwise.",
many=True,
required=False,
allow_null=True)
error = ProblemDetailsSerializer(
help_text="If 'operationState' is 'FAILED_TEMP' or 'FAILED' or " +
"'PROCESSING' or 'ROLLING_BACK' and previous value of 'operationState' " +
"was 'FAILED_TEMP' this attribute shall be present ",
allow_null=True,
required=False
)
_links = LccnLinksSerializer(
help_text="Links to resources related to this resource.",
required=True)
|
def valid_cross_sale_offer_with_charge():
return {
"charge": {
"amount": 1000,
"currency": "EUR",
},
"title": "Test Title",
"description": "Test Description",
"termsAndConditionsUrl": "https://github.com/securionpay",
"template": "text_only",
"companyName": "SecurionPay Tests",
"companyLocation": "CH",
}
|
from constants.nodetype import TYPE_GATEWAY, TYPE_NODE
from meshnet.heartbeat import Heartbeat
from meshnet.routing import Routing, RoutingEntry
from util.debug import dbg, routing_dbg
from util.nodetype import NodeType
# Initialize Routing
routing = Routing()
routing.add(RoutingEntry(b'\x01\x01\x03', NodeType(TYPE_NODE)))
routing.add(RoutingEntry(b'\x01\x02\x03', NodeType(TYPE_GATEWAY)))
# Create Message
message = Heartbeat().to_message()
print(message.to_bytearray())
dbg(message)
print("Processing heartbeat")
heartbeat = Heartbeat().from_bytearray(message.content)
heartbeat.node_type = NodeType(TYPE_NODE)
heartbeat.proccess()
routing_dbg()
|
import pyodbc
import time
from datetime import timezone
import datetime
import sys
import glob
import boto3
from botocore.errorfactory import ClientError
import pandas as pd
import csv
import s3fs
import os
## Fetch the arguments
#server = sys.argv[1]
DBName = sys.argv[1]
LSBucket=sys.argv[2]
## I am hard-coing the server name here, but you can choose to pass it as an argument
server = '<servername>'
srvname = server.partition(".")[0]
database = 'master'
username = '<username>'
password = '<pwd>'
CSVLogfile = srvname+"_"+DBName+"_CSVLog.csv"
### Create a CSV Log file and upload it to S3 #####
s3 = boto3.client('s3')
try:
s3.head_object(Bucket=LSBucket, Key=CSVLogfile)
except ClientError:
# Not found
with open(CSVLogfile, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["ServerName", "DBName", "RunTime(UTC)", "LastRestoredFile", "LastRestoredDate", "OutputLog", "ReplicationLag(mins)"])
with open(CSVLogfile, "rb") as f:
s3.upload_fileobj(f, LSBucket, CSVLogfile)
###Function write to csv
def write_to_csv(server, DBName, utc_time, lastrestoredobject, lastrestoreddate, OutputMSG, time_Delta, LSBucket, CSVLogfile):
#download file from s3 to tmp
s3 = boto3.resource('s3')
obj = s3.Object(LSBucket, CSVLogfile)
obj.download_file('templog.csv')
my_bucket = s3.Bucket(LSBucket)
# list you want to append
lists = [server, DBName, utc_time, lastrestoredobject, lastrestoreddate, OutputMSG, time_Delta]
with open('templog.csv','r') as infile:
reader = list(csv.reader(infile))
reader = reader[::-1] # the date is ascending order in file
reader.insert(0,lists)
with open('templog.csv', 'w', newline='') as outfile:
writer = csv.writer(outfile)
for line in reversed(reader): # reverse order
writer.writerow(line)
#upload file from tmp to s3 key
my_bucket.upload_file('templog.csv', CSVLogfile)
os.remove('templog.csv')
s3 = boto3.resource('s3')
my_bucket = s3.Bucket(LSBucket)
dt = datetime.datetime.now()
utc_time = dt.replace(tzinfo = timezone.utc)
### Connect to SQL
try:
cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+ password,autocommit=True)
cursor = cnxn.cursor()
except:
e = sys.exc_info()[0]
print( "Error: %s" % e )
lastrestoredobject = "N\A"
lastrestoreddate = "N\A"
OutputMSG = "Failed to connect to target SQL Server"
time_Delta = "N\A"
write_to_csv(server, DBName, utc_time, lastrestoredobject, lastrestoreddate, OutputMSG, time_Delta, LSBucket, CSVLogfile)
raise
###### Get the last file restored on target database ###############
query = ("exec msdb.dbo.rds_task_status @db_name = ?")
# execute the query and read to a dataframe in Python
data = pd.read_sql(sql=query, con=cnxn, params=[DBName])
# filter data with query method
data.query('lifecycle == "SUCCESS"', inplace = True)
maxClm = data['task_id'].max()
data.query ('task_id == @maxClm', inplace = True)
lastrestoredobject = data.S3_object_arn.values[0]
lastrestoredfile = lastrestoredobject.partition("/")[2]
# Get the date of this restored file
for my_bucket_object in my_bucket.objects.all():
if my_bucket_object.key == lastrestoredfile:
lastrestoreddate = my_bucket_object.last_modified
###### Get all the files in S3 dated after above file ################
unsortedS3 = []
suffix = 'trn'
for my_bucket_object in my_bucket.objects.all():
if my_bucket_object.last_modified > lastrestoreddate:
if my_bucket_object.key.endswith(suffix):
#print('{0}:{1}'.format(my_bucket.name, my_bucket_object.key))
unsortedS3.append(my_bucket_object)
if not unsortedS3:
OutputMSG = "There are no new files to restore"
else:
###### find the oldest file ################
sortedS3 = [obj.key for obj in sorted(unsortedS3, key=lambda x: x.last_modified)][0:9]
oldesttrnfile = sortedS3[0]
nextrestorefile = 'arn:aws:s3:::'+LSBucket+'/'+oldesttrnfile
####### Restore the file ##############
## Check if there is any other process in progress ?
restore_query = """exec msdb.dbo.rds_restore_log
@restore_db_name=?,
@s3_arn_to_restore_from=?,
@with_norecovery=1;"""
restore_args = (DBName,nextrestorefile)
cursor.execute(restore_query,restore_args)
OutputMSG = "Ran restore for file: "+nextrestorefile
time_delta = (utc_time - lastrestoreddate)
time_Delta= ((time_delta.total_seconds())/60)
write_to_csv(server, DBName, utc_time, lastrestoredobject, lastrestoreddate, OutputMSG, time_Delta, LSBucket, CSVLogfile)
###### Alert if > 15 mins ###########
sns = boto3.client('sns')
snsmessage = (server+".\n"+"LogShipping is out of sync for database: "+DBName+".\n"+"Lag time (mins):"+str(time_Delta))
if time_Delta > 15:
response = sns.publish(
TopicArn='<topicname>',
Subject=("LOGSHIPPING IS OUT OF SYNC"),
Message=(snsmessage)
)
|
# Generated by Django 2.2.1 on 2019-05-31 07:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('read_only_site', '0004_auto_20190531_0700'),
]
operations = [
migrations.AlterField(
model_name='match',
name='match_identifier',
field=models.CharField(default=None, max_length=200),
),
]
|
__version__ = '0.1.5'
from .convert import (
adjac_to_nested, set_attr, get_subtree, adjac_to_nested_with_attr)
from .extract import (
remove_node_ids, extract_subtrees, trunc_leaves, drop_nodes, replace_attr)
from .encode import (
to_hash, unique_trees)
from .shingleset import shingleset
|
# The Hazard Library
# Copyright (C) 2012-2016 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.chiou_youngs_2008_swiss import (
ChiouYoungs2008SWISS01,
ChiouYoungs2008SWISS06,
ChiouYoungs2008SWISS04)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
class ChiouYoungs2008SWISS01TestCase(BaseGSIMTestCase):
GSIM_CLASS = ChiouYoungs2008SWISS01
def test_std_total(self):
self.check('CY08Swiss/cy_2008_phis_ss_embeded.csv',
max_discrep_percentage=2.00)
def test_mean_hanging_wall_normal_slip(self):
self.check('CY08Swiss/CY08_MEDIAN_MS_HW_NM_VsK-1.csv',
max_discrep_percentage=0.80)
def test_mean_hanging_wall_reversed_slip(self):
self.check('CY08Swiss/CY08_MEDIAN_MS_HW_RV_VsK-1.csv',
max_discrep_percentage=0.80)
def test_mean_hanging_wall_strike_slip(self):
self.check('CY08Swiss/CY08_MEDIAN_MS_HW_SS_VsK-1.csv',
max_discrep_percentage=0.80)
class ChiouYoungs2008SWISS06TestCase(BaseGSIMTestCase):
GSIM_CLASS = ChiouYoungs2008SWISS06
def test_std_total(self):
self.check('CY08Swiss/cy_2008_phis_ss_embeded.csv',
max_discrep_percentage=2.00)
def test_mean_hanging_wall_normal_slip(self):
self.check('CY08Swiss/CY08_MEDIAN_MS_HW_NM_VsK-6.csv',
max_discrep_percentage=0.80)
def test_mean_hanging_wall_reversed_slip(self):
self.check('CY08Swiss/CY08_MEDIAN_MS_HW_RV_VsK-6.csv',
max_discrep_percentage=0.80)
def test_mean_hanging_wall_strike_slip(self):
self.check('CY08Swiss/CY08_MEDIAN_MS_HW_SS_VsK-6.csv',
max_discrep_percentage=0.80)
class ChiouYoungs2008SWISS04TestCase(BaseGSIMTestCase):
GSIM_CLASS = ChiouYoungs2008SWISS04
def test_std_total(self):
self.check('CY08Swiss/cy_2008_phis_ss_embeded.csv',
max_discrep_percentage=2.00)
def test_mean_hanging_wall_normal_slip(self):
self.check('CY08Swiss/CY08_MEDIAN_MS_HW_NM_VsK-4.csv',
max_discrep_percentage=0.80)
def test_mean_hanging_wall_reversed_slip(self):
self.check('CY08Swiss/CY08_MEDIAN_MS_HW_RV_VsK-4.csv',
max_discrep_percentage=0.80)
def test_mean_hanging_wall_strike_slip(self):
self.check('CY08Swiss/CY08_MEDIAN_MS_HW_SS_VsK-4.csv',
max_discrep_percentage=0.80)
|
import random
from typing import List, Optional, Union
import jira
import logging
from jira import JIRAError
from time import sleep
from .ticket import Ticket
from .utils import ObliviousCookieJar
from ...common.enum import to_enumable
DEV_STATUS_API_PATH = "{server}/rest/dev-status/latest/{path}"
JIRA_SERVER_ERRORS_TO_RETRY = (500, 504)
class JiraClient(jira.client.JIRA):
def __init__(self, server: str, username: str, password: str, lazy: bool = False, *args, **kwargs):
self._current_project: [str, None] = kwargs.pop("default_project", None)
super().__init__(basic_auth=(username, password),
options={"server": server,
"verify": kwargs.pop("ca_bundle", True)},
*args, **kwargs)
self.server: str = server
self._session.cookies = ObliviousCookieJar()
self._current_user: str = username
self._current_ticket: [Ticket, None] = None
self.TicketTypes = None
self.Priorities = None
self.Components = None
self.Projects = None
self.FixVersions = None
self.FieldsByKey = None
self.FieldsByName = None
self.Statuses = None
self.EpicLinks = None
if not lazy:
self.load()
def load(self):
if not self.TicketTypes:
self.TicketTypes = to_enumable("TicketTypes", "name", "name", self.issue_types())
if not self.Priorities:
self.Priorities = to_enumable("Priority", "name", "name", self.priorities())
if not self.Projects:
self.Projects = to_enumable("Projects", "name", "key", self.projects())
if not self.FieldsByName:
self.FieldsByName = to_enumable("Fields", "name", "id", self.fields())
if not self.FieldsByKey:
self.FieldsByKey = to_enumable("Fields", "id", "name", self.fields(), clean_values=True)
if not self.Statuses:
self.Statuses = to_enumable("Statuses", "name", "id", self.statuses())
if self._current_project:
if not self.Components:
self.Components = to_enumable("Components", "name", "id",
self.project_components(self._current_project))
if not self.FixVersions:
self.FixVersions = to_enumable("FixVersions", "name", "name",
self.project_versions(self._current_project))
def set_project(self, project: str) -> None:
self._current_project = project
def set_ticket(self, key: str, **kwargs) -> None:
self._current_ticket = Ticket(client=self, key=key, **kwargs)
def get_ticket(self, key: str = None, **kwargs) -> Ticket:
if ("http://" in key or "https://" in key) and r"/browse/" in key:
key = key.split(r"/")[-1]
if "|" in key:
key = key.split("|")[0]
return Ticket(client=self, key=key, **kwargs) if key else self._current_ticket
def search_tickets(self, jql: str, fields=None, *args, **kwargs):
try:
jira_issues = self.search_issues(jql, fields=fields, *args, **kwargs)
except JIRAError as e:
if e.status_code in JIRA_SERVER_ERRORS_TO_RETRY: # Jira sometimes returns internal server error, retry once if so
logging.error('Retrying: %s' % e.__str__())
sleep(random.randint(1, 5))
jira_issues = self.search_issues(jql, fields=fields, *args, **kwargs) # raise if 500 returned again
else:
raise
return [self.get_ticket(issue.key, fields=fields) for issue in jira_issues]
def comment(self, message: str, ticket_key: str = None) -> None:
self.get_ticket(key=ticket_key).comment(message)
def create_ticket(self,
summary: str,
ticket_type: str = "Task",
description: str = "",
project: str = None,
assignee: str = None,
reporter: str = None,
priority: str = None,
components: Optional[Union[List[str], str]] = None,
fix_versions: Optional[Union[List[str], str]] = None,
**kwargs) -> Ticket:
fields_kwargs = dict(summary=summary,
project={'key': project or self._current_project},
issuetype={'name': ticket_type},
description=description)
if assignee:
fields_kwargs["assignee"] = {'name': assignee} or None
if reporter:
fields_kwargs["reporter"] = {'name': reporter} or None
if priority:
fields_kwargs["priority"] = {'name': priority}
if fix_versions:
if not isinstance(fix_versions, list):
fix_versions = [self.FixVersions.get(fv) for fv in fix_versions]
fields_kwargs["fixVersions"] = [{"name": fv} for fv in fix_versions]
if components:
if not isinstance(components, list):
components = [self.Components.get(cm) for cm in components.split(",")]
fields_kwargs["components"] = [{"id": cm} for cm in components]
fields_kwargs.update(kwargs)
issue = self.create_issue(**fields_kwargs)
return self.get_ticket(issue.key)
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# this is wait Web Driver to update the XPATH
def waitWebDriver(driver, xpath):
return WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, xpath))
)
def waitWebDriverByClass_name(driver, className):
return WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, className))
)
|
import sys
w, h = map(int, sys.stdin.readline().split())
def main():
if w / h == 16 / 9:
return "16:9"
return "4:3"
if __name__ == "__main__":
ans = main()
print(ans)
|
from app import db
from datetime import datetime, timedelta
"""
the fine for each extra day after the due date
"""
EXTRA_PER_DAY_FINE = 2
class Transaction(db.Model):
__tablename__ = 'transaction'
id = db.Column(db.Integer, primary_key=True)
returned = db.Column(db.Boolean)
issue_date = db.Column(db.Date, default=datetime.today().date())
due_date = db.Column(db.Date, default=None)
return_date = db.Column(db.Date, default=None)
fees = db.Column(db.Integer)
# foreign keys
member_id = db.Column(db.Integer, db.ForeignKey('member.id'))
book_instance_id = db.Column(db.Integer, db.ForeignKey('book_instance.id'))
# back populates
book_instance = db.relationship('BookInstance', back_populates='transactions')
member = db.relationship('Member', back_populates='transactions')
def __repr__(self):
return '<Transaction : {}, member_id: {}, returned: {}>'.format(self.name, self.author, self.description)
def to_json(self, calculate_fees=False):
if calculate_fees:
self.calculate_fees()
json = {
'id': self.id,
'returned': self.returned,
'issue_date': self.issue_date,
'due_date': self.due_date,
'return_date': self.return_date,
'fees': self.fees,
'member_id': self.member_id,
'book_instance_id': self.book_instance_id
}
return json
@staticmethod
def to_json_many(transaction_list, calculate_fees=False):
json_list = []
for transaction in transaction_list:
json_list.append(transaction.to_json(calculate_fees))
return json_list
"""
calculate_fees(): calculates the dynamic fees
new_fees = base_fees + EXTRA_PER_DAY_FINE * extra_days
"""
def calculate_fees(self):
self.fees = BookDetail.query.get(self.book_instance.book_detail_id).base_fees
if not self.returned:
period = (datetime.today().date() - self.issue_date).days
else:
period = (self.return_date - self.issue_date).days
if self.issue_date + timedelta(days=period) > self.due_date:
extra_days = period - (self.issue_date - self.due_date).days
self.fees += extra_days * EXTRA_PER_DAY_FINE
db.session.add(self)
db.session.commit()
if not self.returned:
return self.fees
def issue_book(self, book_instance_id, member_id, issue_period, issue_date=datetime.today().date()):
# Doing checks to find if issue transaction is valid or not
# 1: parameter logic check
book_instance = BookInstance.query.get(book_instance_id)
member = Member.query.get(member_id)
if book_instance is None:
return {"validity": False, "err_msg": 'book_instance does not exist'}
if member is None:
return {"validity": False, "err_msg": 'member does not exist'}
if issue_period <= 0:
return {"validity": False, "err_msg": 'invalid issue_period'}
if issue_date > datetime.today().date():
return {"validity": False, "err_msg": 'invalid issue_date'}
# 2: check if book_instance is available
if not book_instance.is_available:
return {"validity": False, "err_msg": "book unavailable"}
# 3: check if member unbilled < 500
if member.unbilled >= 500:
return {"validity": False, "err_msg": "max debt reached"}
self.returned = False
self.member = member
self.book_instance = book_instance
self.issue_date = issue_date
self.due_date = issue_date + timedelta(days=issue_period)
self.book_instance.is_available = False
self.member.books_taken += 1
db.session.add(self)
db.session.commit()
book_detail = BookDetail.query.get(self.book_instance.book_detail_id)
book_detail.update_stock(-1)
return {"validity": True}
def return_book(self, return_date=datetime.today().date()):
self.returned = True
self.return_date = return_date
self.calculate_fees()
self.book_instance.is_available = True
self.member.unbilled = 0
self.member.total_paid += self.fees
db.session.add(self)
db.session.commit()
book_detail = BookDetail.query.get(self.book_instance.book_detail_id)
book_detail.update_popularity(self.fees)
book_detail.update_stock(1)
from app.books.models import BookDetail, BookInstance
from app.users.models import Member
|
import os
import shutil
from matplotlib import projections
from transformers import LayoutLMTokenizer
import tweepy
import config
import time
import datetime
import plotly.express as px
import undetected_chromedriver as uc
import pandas as pd
import plotly.graph_objects as go
from geopy.geocoders import Nominatim
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
geolocator = Nominatim(user_agent="mass_geocode")
auth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret)
auth.set_access_token(config.access_token, config.access_token_secret)
bearer_token = config.bearer_token
api = tweepy.API(auth)
client = tweepy.Client(bearer_token=config.bearer_token)
options = webdriver.ChromeOptions()
options.add_argument("--disable-extensions")
driver = uc.Chrome(options=options, use_subprocess=True)
driver.get("https://www.gunviolencearchive.org/last-72-hours")
driver.maximize_window()
time.sleep(10)
folder = driver.find_element_by_xpath('//a[@class="button"]')
folder.click()
time.sleep(10)
# folder = driver.find_element_by_xpath('//a[@class="button big"]')
folder = driver.find_element_by_xpath("//a[text()='Download']")
time.sleep(10)
folder.click()
time.sleep(30)
driver.quit()
new_filename = "72hoursdownloaded.csv"
filepath = "c:\\Users\\Joe Wilson\\Downloads"
filename = max(
[filepath + "\\" + f for f in os.listdir(filepath)], key=os.path.getctime
)
shutil.move(os.path.join("C:\\Coding\\Politwitverse", filename), new_filename)
df = pd.read_csv(new_filename)
df["query"] = ""
df["query"] = df["City Or County"] + " " + df["State"]
df["lat"] = ""
df["long"] = ""
df["address"] = ""
length = len(df)
print("Geocoding...")
for i in df.index:
try:
location = geolocator.geocode(df["query"][i])
df.loc[i, "lat"] = location.latitude
df.loc[i, "long"] = location.longitude
df.loc[i, "address"] = location.address
print(f"{i} of {length}")
except:
df.loc[i, "lat"] = ""
df.loc[i, "long"] = ""
df.loc[i, "address"] = ""
print("Geocoding Complete..")
print(df.head())
df["victims"] = ""
print("Calculating Victims...")
victims = 0
for i in df.index:
killed = df.loc[i, "# Killed"]
injured = df.loc[i, "# Injured"]
victims = killed + injured
df.loc[i, "victims"] = victims
print(f"{i} of {length}")
print("calculating Victims Complete...")
print("Summing Victims...")
sum = df["victims"].sum()
print(f"Sum of Victims: {sum}")
print("Summing Victims Complete...")
temp_file = "72final.csv"
df.to_csv(temp_file)
df = pd.read_csv(temp_file)
print("Creating Plot...")
fig = go.Figure(
data=go.Scattergeo(
lon=df["long"],
lat=df["lat"],
text=df["query"],
mode="markers",
marker_size=df["victims"] * 8,
marker_color="darkred",
)
)
fig.update_geos(
visible=True,
resolution=110,
scope="usa",
showcountries=True,
countrycolor="Black",
showsubunits=True,
subunitcolor="Black",
showocean=True,
oceancolor="Blue",
showlakes=True,
lakecolor="Blue",
showrivers=True,
riverwidth=2,
rivercolor="Blue",
framecolor="Black",
countrywidth=3,
bgcolor="darkorange",
)
fig.update_layout(
title=f"Gun Violence Last 72 Hours - {sum} Victims",
title_x=0.5,
title_xanchor="center",
title_y=0.93,
paper_bgcolor="darkorange",
font=dict(color="Black", size=60, family="Bernard MT Condensed"),
)
fig.add_annotation(
dict(
font=dict(color="black", size=40, family="Arial"),
x=0,
y=-0,
showarrow=False,
text="Source: www.gunviolencearchive.org",
textangle=0,
xanchor="left",
xref="paper",
yref="paper",
)
)
now = datetime.datetime.now()
fig.add_annotation(
dict(
font=dict(color="black", size=40, family="Arial"),
x=0.95,
y=-0,
showarrow=False,
text=now.strftime("%m-%d-%Y"),
textangle=0,
xanchor="right",
xref="paper",
yref="paper",
)
)
fig.write_image("72hoursgunviolence.png", width=1500, height=1000)
print("Creating Plot Complete...")
tweet_filename = "72hoursgunviolence.png"
media = api.media_upload(tweet_filename)
tweet = "Gun Violence Last 72 Hours #VoteThemOut #EndGunViolence #moleg"
post_result = api.update_status(status=tweet, media_ids=[media.media_id])
time.sleep(2)
if os.path.exists(new_filename):
os.remove(new_filename)
if os.path.exists(temp_file):
os.remove(temp_file)
if os.path.exists(tweet_filename):
os.remove(tweet_filename)
|
from typing import (
Any,
Iterable,
)
from eth_utils import (
is_bytes,
to_tuple,
)
import rlp
from rlp.codec import _apply_rlp_cache, consume_item
from rlp.exceptions import DecodingError
from rlp.sedes.lists import is_sequence
@to_tuple
def decode_all(rlp: bytes,
sedes: rlp.Serializable = None,
recursive_cache: bool = False,
**kwargs: Any) -> Iterable[Any]:
"""Decode multiple RLP encoded object.
If the deserialized result `obj` has an attribute :attr:`_cached_rlp` (e.g. if `sedes` is a
subclass of :class:`rlp.Serializable`) it will be set to `rlp`, which will improve performance
on subsequent :func:`rlp.encode` calls. Bear in mind however that `obj` needs to make sure that
this value is updated whenever one of its fields changes or prevent such changes entirely
(:class:`rlp.sedes.Serializable` does the latter).
:param sedes: an object implementing a function ``deserialize(code)`` which will be applied
after decoding, or ``None`` if no deserialization should be performed
:param **kwargs: additional keyword arguments that will be passed to the deserializer
:param strict: if false inputs that are longer than necessary don't cause an exception
:returns: the decoded and maybe deserialized Python object
:raises: :exc:`rlp.DecodingError` if the input string does not end after the root item and
`strict` is true
:raises: :exc:`rlp.DeserializationError` if the deserialization fails
"""
if not is_bytes(rlp):
raise DecodingError('Can only decode RLP bytes, got type %s' % type(rlp).__name__, rlp)
end = 0
rlp_length = len(rlp)
while rlp_length - end > 0:
try:
item, per_item_rlp, end = consume_item(rlp, end)
except IndexError:
raise DecodingError('RLP string too short', rlp)
if sedes:
obj = sedes.deserialize(item, **kwargs)
if is_sequence(obj) or hasattr(obj, '_cached_rlp'):
_apply_rlp_cache(obj, per_item_rlp, recursive_cache)
yield obj
else:
yield item
|
import collections
from thegame.abilities import Ability
Vector = collections.namedtuple('Vector', ('x', 'y'))
Vector.__doc__ = '''
A 2D vector.
Used to represent a point and velocity in thegame
'''
class _EntityAttribute:
def __init__(self, doc=None):
self.__doc__ = doc
def __set_name__(self, klass, name):
self.name = name
def __get__(self, instance, klass=None):
if instance is None:
return self
return getattr(instance.data.entity, self.name)
def __set__(self, obj, value):
raise AttributeError(f'read-only attribute {self.name!r}')
class _DataAttribute:
def __init__(self, doc=None):
self.__doc__ = doc
def __set_name__(self, klass, name):
self.name = name
def __get__(self, instance, klass=None):
if instance is None:
return self
return getattr(instance.data, self.name)
def __set__(self, obj, value):
raise AttributeError(f'read-only attribute {self.name!r}')
class Entity:
def __init__(self, data):
self.data = data
def __repr__(self):
return (
f'<{self.__class__.__name__}#{self.id} '
f'BD={self.body_damage} '
f'HP={self.health}/{self.max_health} '
f'@({self.position.x:.0f},{self.position.y:.0f})>'
)
id = _EntityAttribute('The id of the entity')
@property
def position(self):
'''
The position of the entity in a 2-tuple (x, y).
'''
p = self.data.entity.position
return Vector(p.x, p.y)
@property
def velocity(self):
'''
The velocity of the entity in a 2-tuple (x, y).
'''
v = self.data.entity.velocity
return Vector(v.x, v.y)
radius = _EntityAttribute('The radius of the entity')
health = _EntityAttribute(
'''
The health of the entity in a non-negative integer.
When a entity's health is less than or equal to zero it dies.
And the one dealing the killing blow is rewarded with
``rewarding_experience``.
'''
)
body_damage = _EntityAttribute(
'''
The body damage of the entity.
When two entities collide, they reduce each other's health
with their body damage.
'''
)
rewarding_experience = _EntityAttribute(
'''
How much experience you will get if you kill this entity.
'''
)
max_health = _EntityAttribute(
'''
The maximum health of this entity.
'''
)
class Polygon(Entity):
'''
The netural polygons.
'''
@property
def edges(self):
'''
How many edges does the polygon have
'''
return self.data.edges
class Bullet(Entity):
'''
The bullet. Shot from a Hero.
'''
@property
def owner_id(self):
'''
The id of the hero owning the bullet
'''
return self.data.owner
HeroAbility = collections.namedtuple(
'HeroAbility',
['level', 'value']
)
HeroAbilityList = collections.namedtuple(
'HeroAbilityList',
[ab.as_camel for ab in Ability]
)
class _HeroAbilityShortcut:
def __init__(self, ability):
self.ability = ability
self.__doc__ = \
f'shortcut to ``hero.abilities.{ability.as_camel}.value``'
def __get__(self, instance, klass=None):
if instance is None:
return self
return instance.abilities[self.ability].value
def __set__(self, obj, value):
raise AttributeError(f'read-only attribute {self.name!r}')
class _HeroAbilityLevelShortcut:
def __init__(self, ability):
self.ability = ability
self.__doc__ = \
f'shortcut to ``hero.abilities.{ability.as_camel}.level``'
def __get__(self, instance, klass=None):
if instance is None:
return self
return instance.abilities[self.ability].level
def __set__(self, obj, value):
raise AttributeError(f'read-only attribute {self.name!r}')
class _HeroMeta(type):
@classmethod
def __prepare__(mcs, name, bases, **kwds):
return {
**{
ab.as_camel: _HeroAbilityShortcut(ab)
for ab in Ability
},
**{
ab.as_camel + '_level': _HeroAbilityLevelShortcut(ab)
for ab in Ability
}
}
class Hero(Entity, metaclass=_HeroMeta):
'''
A Hero is a player in thegame.
'''
def __init__(self, data):
super().__init__(data)
# we're doing this so it will not be modified accidently
# maybe not a good way, though.
self.__dict__['abilities'] = HeroAbilityList(
*[HeroAbility(*x) for x in zip(
self.data.ability_levels, self.data.ability_values)]
)
@property
def abilities(self):
'''
returns a tuple of abilities.
Example::
hero.abilities[MaxHealth].value # get the hero's max health
hero.abilities.max_health.value # the same thing
hero.abilities[MaxHealth].level # get the ability level
hero.abilities.max_health.level # the same thing again
'''
return self.__dict__['abilities']
orientation = _DataAttribute(
'''
The orientation of the hero; the direction the barrel is facing at,
in radians.
'''
)
level = _DataAttribute('The level of the hero')
score = _DataAttribute('The score of the hero')
experience = _DataAttribute('The experience the hero has')
experience_to_level_up = _DataAttribute(
'The experience required for the hero to level up')
skill_points = _DataAttribute(
'Number of skill points available to level up abilities'
)
cooldown = _DataAttribute(
'''
How many ticks until a bullet is ready.
Increase the *reload* ability to reduce the cooldown.
``shoot`` and ``shoot_at`` can still be called when on cooldown, but
nothing will happen instead.
'''
)
health_regen_cooldown = _DataAttribute(
'''
How many ticks until the hero can start to regenerate health
'''
)
name = _DataAttribute(
'''
The name of the hero. Not guranteed to be unique
'''
)
|
# -*- coding: utf-8 -*-
import json
from plivo.base import (ListResponseObject)
from plivo.exceptions import ValidationError
from plivo.resources import MultiPartyCall, MultiPartyCallParticipant
from plivo.utils.signature_v3 import construct_get_url
from tests.base import PlivoResourceTestCase
from tests.decorators import with_response
class MultiPartyCallsTest(PlivoResourceTestCase):
def __assert_requests(self, expected_url, expected_method, expected_request_body=None, actual_response=None):
self.maxDiff = None
# Verifying the api hit
self.assertEqual(expected_url, self.client.current_request.url)
# Verifying the method used
self.assertEqual(expected_method, self.client.current_request.method)
if expected_request_body:
# Verifying the request body sent
self.assertDictEqual(expected_request_body, json.loads(self.client.current_request.body.decode('utf-8')))
if actual_response:
# Verifying the mock response
self.assertResponseMatches(actual_response)
def test_add_participant_validations(self):
error_message = ''
friendly_name = 'friendly_name'
uuid = '1234-5678-9012-3456'
try:
self.client.multi_party_calls.add_participant(role='agent')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, 'specify either multi party call friendly name or uuid')
try:
self.client.multi_party_calls.add_participant(role='supervisor', friendly_name=friendly_name, uuid=uuid)
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, 'cannot specify both multi party call friendly name or uuid')
try:
self.client.multi_party_calls.add_participant(role='customer', uuid=uuid)
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, 'specify either call_uuid or (from, to)')
try:
self.client.multi_party_calls.add_participant(role='customer', uuid=uuid, from_='123456', call_uuid=uuid)
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, 'cannot specify call_uuid when (from, to) is provided')
try:
self.client.multi_party_calls.add_participant(role='agent', uuid=uuid, to_='123456')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, 'specify (from, to) when not adding an existing call_uuid '
'to multi party participant')
try:
self.client.multi_party_calls.add_participant(role='manager')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "[\"role should be in ('agent', 'supervisor', "
"'customer') (actual value: manager)\"]")
try:
self.client.multi_party_calls.add_participant(role='supervisor', friendly_name=1234)
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "[\"friendly_name should be of type: ['str']\"]")
try:
self.client.multi_party_calls.add_participant(role='supervisor', uuid=1234)
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "[\"uuid should be of type: ['str']\"]")
try:
self.client.multi_party_calls.add_participant(role='supervisor', call_status_callback_url='callback_python')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "['call_status_callback_url should match format "
"(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|"
"(?:%[0-9a-fA-F][0-9a-fA-F]))+|None) (actual value: callback_python)']")
try:
self.client.multi_party_calls.add_participant(role='supervisor', call_status_callback_method='HEAD')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "[\"call_status_callback_method should be in "
"('GET', 'POST') (actual value: HEAD)\"]")
try:
self.client.multi_party_calls.add_participant(role='supervisor', confirm_key='K')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "[\"confirm_key should be in ('0', '1', '2', '3', '4', '5', '6', '7', '8', "
"'9', '#', '*') (actual value: K)\"]")
try:
self.client.multi_party_calls.add_participant(role='supervisor', ring_timeout='2500')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "[\"ring_timeout should be of type: ['int']\"]")
try:
self.client.multi_party_calls.add_participant(role='supervisor', max_duration=29867)
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "['300 < max_duration <= 28800 (actual value: 29867)']")
try:
self.client.multi_party_calls.add_participant(role='supervisor', status_callback_events='agent-transfer')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "[\"status_callback_events should be among ('mpc-state-changes', "
"'participant-state-changes', 'participant-speak-events', "
"'participant-digit-input-events', 'add-participant-api-events'). multiple "
"values should be COMMA(,) separated (actual value: agent-transfer)\"]")
try:
self.client.multi_party_calls.add_participant(role='supervisor', stay_alone=1)
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "[\"stay_alone should be of type: ['bool']\"]")
try:
self.client.multi_party_calls.add_participant(role='supervisor', enter_sound='beep:3')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "enter_sound did not satisfy any of the required types")
@with_response(200)
def test_add_participant(self):
request_body = {
'exit_sound_method': 'GET',
'exit_sound': 'beep:2',
'enter_sound_method': 'GET',
'enter_sound': 'beep:1',
'relay_dtmf_inputs': False,
'end_mpc_on_exit': False,
'start_mpc_on_enter': True,
'hold': False, 'mute': False,
'coach_mode': True,
'stay_alone': False,
'status_callback_events': 'mpc-state-changes,participant-state-changes',
'record_file_format': 'mp3',
'record': False,
'on_exit_action_method': 'POST',
'status_callback_method': 'GET',
'recording_callback_method': 'GET',
'customer_hold_music_method': 'GET',
'agent_hold_music_method': 'GET',
'wait_music_method': 'GET',
'max_participants': 10,
'max_duration': 14400,
'ring_timeout': 45,
'dial_music': 'real',
'confirm_key_sound_method': 'GET',
'call_status_callback_method': 'POST',
'call_uuid': '1234-5678-4321-0987',
'role': 'agent'
}
add_participant_response = self.client.multi_party_calls.add_participant(friendly_name='Voice', role='agent',
call_uuid='1234-5678-4321-0987')
self.__assert_requests(actual_response=add_participant_response, expected_method='POST',
expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/'
'MultiPartyCall/name_Voice/Participant/',
expected_request_body=request_body)
# update the request body for next set of params
request_body.pop('call_uuid', None)
request_body['to'] = '180012341234'
request_body['from'] = '918888888888'
request_body['role'] = 'supervisor'
request_body['coach_mode'] = False
request_body['dial_music'] = 'http://music.plivo.com/bella-ciao.wav'
request_body['status_callback_events'] = 'participant-speak-events'
request_body['ring_timeout'] = 100
request_body['max_duration'] = 25000
request_body['max_participants'] = 5
request_body['relay_dtmf_inputs'] = True
request_body['customer_hold_music_url'] = 'http://music.plivo.com/bella-ciao.wav'
request_body['customer_hold_music_method'] = 'POST'
request_body['exit_sound_method'] = 'POST'
request_body['record_file_format'] = 'wav'
add_participant_response = self.client.multi_party_calls.add_participant(
uuid='12345678-90123456', role='supervisor', to_='180012341234', from_='918888888888',
coach_mode=False, dial_music='http://music.plivo.com/bella-ciao.wav', ring_timeout=100,
status_callback_events='participant-speak-events', max_duration=25000, max_participants=5,
relay_dtmf_inputs=True, customer_hold_music_url='http://music.plivo.com/bella-ciao.wav',
customer_hold_music_method='post', exit_sound_method='Post', record_file_format='wav')
self.__assert_requests(actual_response=add_participant_response, expected_method='POST',
expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/'
'MultiPartyCall/uuid_12345678-90123456/Participant/',
expected_request_body=request_body)
@with_response(200)
def test_start_MPC(self):
request_body = {'status': 'active'}
start_mpc_response = self.client.multi_party_calls.start(friendly_name='Voice')
self.__assert_requests(actual_response=start_mpc_response, expected_method='POST',
expected_url='https://api.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/'
'MultiPartyCall/name_Voice/',
expected_request_body=request_body)
start_mpc_response = self.client.multi_party_calls.start(uuid='12345678-90123456')
self.__assert_requests(actual_response=start_mpc_response, expected_method='POST',
expected_url='https://api.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/'
'MultiPartyCall/uuid_12345678-90123456/',
expected_request_body=request_body)
@with_response(200)
def test_end_MPC(self):
end_mpc_response = self.client.multi_party_calls.stop(friendly_name='Voice')
self.__assert_requests(actual_response=end_mpc_response, expected_method='DELETE',
expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/'
'MultiPartyCall/name_Voice/')
end_mpc_response = self.client.multi_party_calls.stop(uuid='12345678-90123456')
self.__assert_requests(actual_response=end_mpc_response, expected_method='DELETE',
expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/'
'MultiPartyCall/uuid_12345678-90123456/')
def test_list_mpc_validations(self):
error_message = ''
try:
self.client.multi_party_calls.list(sub_account='Voice')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, 'sub_account did not satisfy any of the required types')
try:
self.client.multi_party_calls.list(status='terminating')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "[\"status should be in ('active', 'initialized', 'ended') "
"(actual value: terminating)\"]")
try:
self.client.multi_party_calls.list(termination_cause_code='2000')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "[\"termination_cause_code should be of type: ['int']\"]")
try:
self.client.multi_party_calls.list(end_time__lte='20-10-3 9:22')
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "['end_time__lte should match format ^\\\\d{4}-\\\\d{2}-\\\\d{2} \\\\d{2}:"
"\\\\d{2}(:\\\\d{2}(\\\\.\\\\d{1,6})?)?$ (actual value: 20-10-3 9:22)']")
try:
self.client.multi_party_calls.list(limit=300)
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "['0 < limit <= 20 (actual value: 300)']")
try:
self.client.multi_party_calls.list(offset=-1)
except ValidationError as e:
error_message = str(e)
self.assertEqual(error_message, "['0 <= offset (actual value: -1)']")
@with_response(200)
def test_list_MPC(self):
multi_party_calls = self.client.multi_party_calls.list()
self.__assert_requests(expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/MultiPartyCall/',
expected_method='GET')
# check we received a list response
self.assertIsInstance(multi_party_calls, ListResponseObject)
# check if objects are case to MultiPartyCall
self.assertIsInstance(multi_party_calls.objects[0], MultiPartyCall)
self.assertIsInstance(multi_party_calls.objects[len(multi_party_calls.objects)-1], MultiPartyCall)
# check if ID is correctly read in 5th random object
self.assertEqual(multi_party_calls.objects[5].id, "9aad6d16-ed2c-4433-9313-26f8cfc4d99c")
# check if friendly_name is correctly read in 18th random object
self.assertEqual(multi_party_calls.objects[18].friendly_name, "Gasteiz / Vitoria")
# check if termination_cause is correctly read in 13th random object
self.assertEqual(multi_party_calls.objects[13].termination_cause, "Hangup API Triggered")
# check if termination_cause_code is correctly read in 12th random object
self.assertEqual(multi_party_calls.objects[12].termination_cause_code, 2000)
# check if status is correctly read in 7th random object
self.assertEqual(multi_party_calls.objects[7].status, "Active")
# check if billed_amount is correctly read in 17th random object
self.assertEqual(multi_party_calls.objects[17].billed_amount, 0.66)
# check for case where filters are sent in request body and compare request body this time
request_body = {
'sub_account': 'SAWWWWWWWWWWWWWWWWWW',
'friendly_name': 'axa',
'status': 'active',
'termination_cause_code': 1010,
'end_time__gte': '2020-03-10 11:45',
'creation_time__lte': '2020-03-30 09:35',
'limit': 15,
'offset': 156
}
self.client.multi_party_calls.list(**request_body)
# Construct sorted GET url for both cases
expected_url = construct_get_url('https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/MultiPartyCall/',
params=request_body)
actual_url = construct_get_url(self.client.current_request.url, params={})
print(actual_url)
self.assertEqual(expected_url, actual_url)
# Verifying the method used
self.assertEqual('GET', self.client.current_request.method)
def test_get_MPC(self):
response = {
'api_id': 'd0e000c6-9ace-11ea-97d8-1094bbeb5c2c',
'friendly_name': 'Chamblee',
'mpc_uuid': '9aad6d16-ed2c-4433-9313-26f8cfc4d99c',
'participants': '/v1/Account/MAXXXXXXXXXXXXXXXXXX/MultiPartyCall/name_Chamblee/Participant/',
'recording': 'not-recording',
'resource_uri': '/v1/Account/MAXXXXXXXXXXXXXXXXXX/MultiPartyCall/name_Chamblee/',
'start_time': '2020-05-18 22:02:51+05:30',
'status': 'Active',
'stay_alone': True
}
self.expected_response = response
actual_response = self.client.set_expected_response(status_code=200, data_to_return=response)
multi_party_call = self.client.multi_party_calls.get(friendly_name=response['friendly_name'])
self.__assert_requests(expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/'
'MultiPartyCall/name_{}/'.format(response['friendly_name']),
expected_method='GET', actual_response=actual_response)
# check we received a list response
self.assertIsInstance(multi_party_call, MultiPartyCall)
# check if ID is correctly read in object
self.assertEqual(multi_party_call.id, response['mpc_uuid'])
# check if friendly_name is correctly read in object
self.assertEqual(multi_party_call.friendly_name, response['friendly_name'])
# check if termination_cause is correctly read in 13th random object
self.assertEqual(multi_party_call.recording, response['recording'])
# check if billed_amount is correctly read in object
self.assertEqual(multi_party_call.stay_alone, True)
@with_response(200)
def test_update_MPC_participant(self):
participant_id = '10'
uuid = '12345678-90123456'
coach_mode = False
mute = True
update_response = self.client.multi_party_calls.update_participant(
participant_id=participant_id,
uuid=uuid,
coach_mode=coach_mode,
mute=mute
)
self.__assert_requests(expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/'
'MultiPartyCall/uuid_{}/Participant/{}/'.format(uuid, participant_id),
expected_method='POST', expected_request_body={'coach_mode': coach_mode, 'mute': mute},
actual_response=update_response)
def test_kick_MPC_participant(self):
self.client.set_expected_response(status_code=204, data_to_return=None)
participant_id = 10
uuid = '12345678-90123456'
self.client.multi_party_calls.kick_participant(
participant_id=participant_id,
uuid=uuid
)
self.__assert_requests(expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/'
'MultiPartyCall/uuid_{}/Participant/{}/'.format(uuid, participant_id),
expected_method='DELETE')
@with_response(200)
def test_start_recording(self):
file_format = 'wav'
status_callback_url = 'https://plivo.com/status'
start_recording_response = self.client.multi_party_calls.\
start_recording(friendly_name='Voice', file_format=file_format, status_callback_url=status_callback_url)
self.__assert_requests(expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/'
'MultiPartyCall/name_{}/Record/'.format('Voice'),
expected_method='POST',
expected_request_body={'file_format': 'wav',
'status_callback_url': status_callback_url,
'status_callback_method': 'POST'},
actual_response=start_recording_response)
def test_stop_recording(self):
self.client.set_expected_response(status_code=204, data_to_return=None)
self.client.multi_party_calls.stop_recording(friendly_name='Voice')
self.__assert_requests(expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/MultiPartyCall/'
'name_{}/Record/'.format('Voice'), expected_method='DELETE')
def test_pause_recording(self):
self.client.set_expected_response(status_code=204, data_to_return=None)
self.client.multi_party_calls.pause_recording(friendly_name='Voice')
self.__assert_requests(expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/MultiPartyCall/'
'name_{}/Record/Pause/'.format('Voice'), expected_method='POST')
def test_resume_recording(self):
self.client.set_expected_response(status_code=204, data_to_return=None)
self.client.multi_party_calls.resume_recording(friendly_name='Voice')
self.__assert_requests(expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/MultiPartyCall/'
'name_{}/Record/Resume/'.format('Voice'), expected_method='POST')
@with_response(200)
def test_get_participant(self):
participant_id = 49
uuid = '18905d56-79c8-41d4-a840-25feff71070e'
resp = self.client.multi_party_calls.get_participant(participant_id=participant_id, uuid=uuid)
self.__assert_requests(expected_url='https://voice.plivo.com/v1/Account/MAXXXXXXXXXXXXXXXXXX/MultiPartyCall/'
'uuid_{}/Participant/{}/'.format(uuid, participant_id),
expected_method='GET')
self.assertIsInstance(resp, MultiPartyCallParticipant)
# Verify whether SecondaryResourceID has been set properly
self.assertEqual(resp.secondary_id, str(participant_id))
# Verify whether call_uuid has been set properly
self.assertEqual(resp.call_uuid, '90de6710-9404-40d1-ba31-f26d2f7c533f')
# Verify whether role has been set properly
self.assertEqual(resp.role, 'customer')
# Verify whether start_on_enter has been set properly
self.assertEqual(resp.start_mpc_on_enter, True)
# Verify whether duration has been set properly
self.assertEqual(resp.duration, 30)
# Verify whether billed_amount has been set properly
self.assertEqual(resp.billed_amount, 0.005)
|
# pacman imports
from pacman.model.partitionable_graph.abstract_partitionable_vertex import \
AbstractPartitionableVertex
from pacman.model.abstract_classes.virtual_partitioned_vertex import \
VirtualPartitionedVertex
# general imports
from abc import ABCMeta
from six import add_metaclass
from abc import abstractmethod
@add_metaclass(ABCMeta)
class AbstractVirtualVertex(AbstractPartitionableVertex):
""" A class that allows models to define that they are virtual
"""
def __init__(self, n_atoms, spinnaker_link_id, label, max_atoms_per_core):
AbstractPartitionableVertex.__init__(
self, n_atoms, label, max_atoms_per_core)
# set up virtual data structures
self._virtual_chip_x = None
self._virtual_chip_y = None
self._real_chip_x = None
self._real_chip_y = None
self._real_link = None
self._spinnaker_link_id = spinnaker_link_id
def create_subvertex(
self, vertex_slice, resources_required, label=None,
constraints=None):
subvertex = VirtualPartitionedVertex(
resources_required, label, self._spinnaker_link_id,
constraints=constraints)
subvertex.set_virtual_chip_coordinates(
self._virtual_chip_x, self._virtual_chip_y, self._real_chip_x,
self._real_chip_y, self._real_link)
return subvertex
@property
def virtual_chip_x(self):
return self._virtual_chip_x
@property
def virtual_chip_y(self):
return self._virtual_chip_y
@property
def real_chip_x(self):
return self._real_chip_x
@property
def real_chip_y(self):
return self._real_chip_y
@property
def real_link(self):
return self._real_link
def set_virtual_chip_coordinates(
self, virtual_chip_x, virtual_chip_y, real_chip_x, real_chip_y,
real_link):
self._virtual_chip_x = virtual_chip_x
self._virtual_chip_y = virtual_chip_y
self._real_chip_x = real_chip_x
self._real_chip_y = real_chip_y
self._real_link = real_link
@property
def spinnaker_link_id(self):
""" property for returning the spinnaker link being used
:return:
"""
return self._spinnaker_link_id
@abstractmethod
def is_virtual_vertex(self):
""" helper method for is instance
:return:
"""
# overloaded method from partitionable vertex
def get_cpu_usage_for_atoms(self, vertex_slice, graph):
return 0
# overloaded method from partitionable vertex
def get_dtcm_usage_for_atoms(self, vertex_slice, graph):
return 0
# overloaded method from partitionable vertex
def get_sdram_usage_for_atoms(self, vertex_slice, graph):
return 0
|
import numpy as np
class Datasets():
def create_np_ones_file(row,col,fn="data/nparray.txt"):
np.ones((row,col)).tofile(fn,sep=",")
def create_test_file(bytes_size,fn="data/test_data.dat"):
with open(fn, "wb") as o:
o.write(np.random.bytes(bytes_size))
#https://docs.ipfs.io/how-to/command-line-quick-start/#initialize-the-repository
def get_example_image_hash():
return "QmSgvgwxZGaBLqkGyWemEDqikCqU52XxsYLKtdy3vGZ8uq"
|
import unittest
from stratuslab_usecases.cli.TestUtils import sshConnectionOrTimeout
import BasicVmLifecycleTestBase
class testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.BasicVmTestBase):
vmName = 'ttylinux'
def test_usecase(self):
sshConnectionOrTimeout(self.ip_addresses[0])
def suite():
return unittest.TestLoader().loadTestsFromTestCase(testVmIsAccessibleViaSsh)
|
while True:
tot = 0
quantcopias = int(input("Digite a quantidade de copias normais: "))
quantcopiasverso = int(input("Digite a quantidade de copias com versos: "))
print("-=" * 30)
if quantcopias > 0:
print("{} copias normais são {} MT".format(quantcopias, quantcopias * 1.5))
tot += quantcopias * 1.5
if quantcopiasverso > 0:
print("{} copias com verso são {} MT".format(quantcopiasverso, quantcopiasverso * 3))
tot += quantcopiasverso * 3
print("-=" * 30)
print("Total é {} MT".format(tot))
print("-=" * 30)
while True:
resp = str(input("Quer Continuar? [S/N] ")).upper()[0]
if resp in "SN":
break
else:
print("INVALIDO!", end="")
if resp == "N":
break
print("<<< VOLTE SEMPRE>>>")
|
KEYDOWN = 'KEYDOWN'
KEYUP = 'KEYUP'
DIRDOWN = 'DIRDOWN'
DIRUP = 'DIRUP'
LEFTTEAM = 'LEFTTEAM'
RIGHTTEAM = 'RIGHTTEAM'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.