content
stringlengths 5
1.05M
|
|---|
# _ _ _ _____ _ ______ ___ ______ _____
# | | | | | ||_ _| | | ___ \ / _ \ | ___ \_ _|
# | |_| | __ _ ___| | _| | | |__ ___| |_/ / _____ __ / /_\ \| |_/ / | |
# | _ |/ _` |/ __| |/ / | | '_ \ / _ \ ___ \/ _ \ \/ / | _ || __/ | |
# | | | | (_| | (__| <| | | | | | __/ |_/ / (_) > < | | | || | _| |_
# \_| |_/\__,_|\___|_|\_\_/ |_| |_|\___\____/ \___/_/\_\ \_| |_/\_| \___/
__title__ = 'HackTheBox API'
__description__ = 'An unofficial API library for HackTheBox <www.hackthebox.eu>'
__url__ = 'https://github.com/zachhanson94/htb'
__version__ = '0.0.1'
__build__ = 0x000001
__author__ = 'Zach Hanson'
__author_email__ = 'zachhanson94@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2020 Zach Hanson'
|
import tweepy
import json
import os
import csv
import pandas as pd
import credentials
ACTORS = ['@lemondefr','@courrierinter','@leJDD','@humanite_fr',
'@LEXPRESS','@libe','@LesEchos','@lobs','@MarianneleMag',
'@Le_Figaro','@AlterEco_','@_polemia','@sputnik_fr','@Europe_Israel',
'@Breizh_Info','@BVoltaire','@RTenfrancais','@InfoMdia','@Valeurs',
'@tvlofficiel','@LePoint','@F_Desouche','@Dreuz_1fo','@ndffr','@FrDesouche',
'@1RiposteLaique','@Contreinfo','@LaManifPourTous','@RNational_off',
'@EetR_National','@lenouvelliste','@letemps','@24heuresch','@20min',
'@20minutesOnline','@tdgch','@tagesanzeiger','@Blickch','@derbund',
'@LuzernerZeitung','@lecourrier','@laliberte','@heidi_news','@Lematinch',
'@BernerZeitung','@AargauerZeitung','@RTSinfo','@CdT_Online','@watson_news',
'@srfnews','@laregione','@RSInews'] # List of twitter accounts to retrieve
FILENAME = 'results/timeline.csv' # Resulting file
LANG = 'fr' # Restrict the query to a specific language ('fr', 'en'), None for all
MAX_TWEETS_PER_ACCOUNT = 4000 # Max number of tweets to retrieve by account, if free twitter license max is 3200
# Create target Directory if don't exist
if not os.path.exists('results'):
os.mkdir('results')
auth = tweepy.OAuthHandler(credentials.CONSUMER_KEY, credentials.CONSUMER_SECRET)
auth.set_access_token(credentials.ACCESS_TOKEN, credentials.ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True)
header = ['tweet_id', 'user_id', 'user_name', 'followers', 'following', 'likes',
'retweets', 'date', 'reply_to_tweet_id', 'reply_to_user_id', 'reply_to_username',
'user_mentions_ids', 'user_mentions_names', 'text', 'retweet_from_user_id',
'retweet_from_username', 'retweet_from_tweet_id', 'urls']
def saveTweet(row, filename):
with open(filename, 'a') as f:
writer = csv.writer(f)
writer.writerow(row)
for actor in ACTORS:
min_id = None
if os.path.exists(FILENAME):
df = pd.read_csv(FILENAME)
df = df[df.user_name == actor[1:]]
df.date = pd.to_datetime(df.date)
ids = df.tweet_id.values
counter = len(ids)
if counter:
min_id = int(df.loc[df.date.idxmax()].tweet_id)
else:
with open(FILENAME, 'w') as f:
writer = csv.writer(f)
writer.writerow(header)
counter = 0
print(min_id)
for tweets in tweepy.Cursor(api.user_timeline, since_id=min_id, screen_name=actor, tweet_mode="extended", lang=LANG, count=100).pages(MAX_TWEETS_PER_ACCOUNT/100):
for tweet in tweets:
tweet_id = tweet.id_str
user_id = tweet.user.id_str
user_name = tweet.user.screen_name
followers = tweet.user.followers_count
following = tweet.user.friends_count
likes = tweet.favorite_count
retweets = tweet.retweet_count
date = tweet.created_at
reply_to_tweet_id = tweet.in_reply_to_status_id_str
reply_to_user_id = tweet.in_reply_to_user_id_str
reply_to_username = tweet.in_reply_to_screen_name
user_mentions_ids = [mention['id_str'] for mention in tweet.entities['user_mentions']]
user_mentions_names = [mention['screen_name'] for mention in tweet.entities['user_mentions']]
urls=[]
for url in tweet.entities['urls']:
urls.append(url['expanded_url'])
try:
text = tweet.extended_tweet["full_text"]
except AttributeError:
text = tweet.full_text
retweet_from_user_id = None
retweet_from_username = None
retweet_from_tweet_id = None
if hasattr(tweet, "retweeted_status"):
retweet_from_user_id = tweet.retweeted_status.user.id_str
retweet_from_username = tweet.retweeted_status.user.screen_name
retweet_from_tweet_id = tweet.retweeted_status.id_str
row = [tweet_id, user_id, user_name, followers, following, likes, retweets, date, reply_to_tweet_id, reply_to_user_id, reply_to_username, user_mentions_ids, user_mentions_names, text, retweet_from_user_id, retweet_from_username, retweet_from_tweet_id, urls]
saveTweet(row, FILENAME)
counter += 1
print(f"{actor} -> Total fetched : {counter}\r", end="")
print(actor, " -> Total fetched : " + str(counter))
|
# # FROM STEPHANIE BACK IN THE DAY...
# 1. Calculate the saturated vapor pressure (ew) as ew = 6.112* exp(17.62*T/(243.12+T) with T in [°C] and ew in [hPa] from CRU temperature at 0.5 x 0.5 degree resolution*
# 2. Calculate %RH at 0.5 x 0.5 degree resolution as e/ew where e is vapor pressure in hPa from CRU
# 3. Replace %RH >100 with 95 (see my last email for why this happens)
# 4. Interpolate %RH to 1km
# 5. Calculate vapor pressure at 1km resolution from interpolated RH and 1km temperature
def convert_to_hur( tas_arr, vap_arr ):
esa_arr = 6.112 * np.exp( 17.62 * tas_arr/ (243.12 + tas_arr) )
# esa_arr = 6.112 * np.exp( 22.46 * tas_arr / (272.62 + tas_arr) )
hur_arr = vap_arr/esa_arr * 100
return hur_arr
if __name__ == '__main__':
# convert the vap/tas to hur for the GD CRU data
import xarray as xr
import numpy as np
# filenames
vap_fn = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS323/cru_ts3.23.1901.2014.vap.dat.nc'
tas_fn = '/Data/Base_Data/Climate/World/CRU_grids/CRU_TS323/cru_ts3.23.1901.2014.tmp.dat.nc'
# open the cru data
vap = xr.open_dataset( vap_fn )
tas = xr.open_dataset( tas_fn )
# slice them to the variable we want and return the 3D array
v = vap.vap.data
t = tas.tmp.data
# # FROM STEPHANIE BACK IN THE DAY...
# 1. Calculate the saturated vapor pressure (ew) as ew = 6.112* exp(17.62*T/(243.12+T) with T in [°C] and ew in [hPa] from CRU temperature at 0.5 x 0.5 degree resolution*
# 2. Calculate %RH at 0.5 x 0.5 degree resolution as e/ew where e is vapor pressure in hPa from CRU
h = convert_to_hur( t, v )
# 3. Replace %RH >100 with 95 (see my last email for why this happens)
h[ (~np.isnan(h)) & (h < 0) ] = 0
h[ (~np.isnan(h)) & (h > 100) ] = 95
# write this to disk:
hur = vap.vap.copy()
# update the DataArray attributes since we updated the data to a new variable
hur.attrs.update( long_name='relative humidity',
units='pct', derived_by='SNAP - 12/8/2016',
derived_author='Michael Lindgren (malindgren@alaska.edu)' )
# convert to an xarray dataset
hur = hur.to_dataset( name='hur' )
# put the data from above into the object.
hur[ 'hur' ] = (('time', 'lat', 'lon' ), h )
# update the Dataset attributes
hur.attrs.update( COMMENTS='Variable computed by Michael Lindgren at SNAP 12/8/2016 using cru tmp and cru vap', equation_step1='esa_arr = 6.112 * np.exp( 17.62 * tas_arr / (243.12 + tas_arr) )', equation_step2='hur_arr = vap_arr/esa_arr * 100', post_process_step='values < 0 were set to 0. values < 100 were set to 95 [per Stephanie McAfee suggestion]' )
# write to disk
output_filename = vap_fn.replace( 'vap', 'hur' ).replace( '.nc', '_snap_conversion.nc' )
hur.to_netcdf( path=output_filename, mode='w' )
# 4. Interpolate %RH to 1km
# ---> RUN DOWNSCALE
# 5. Calculate vapor pressure at 1km resolution from interpolated RH and 1km temperature
|
#!/usr/bin/env python
import boto3
import os
from datacoco_cloud import UNIT_TEST_KEY
import logging
class S3toS3Interaction(object):
"""
Class to simplify S3 to S3 Interactions using boto3
"""
def __init__(
self,
source_aws_key: str,
source_aws_secret: str,
target_aws_key: str,
target_aws_secret: str,
source_aws_region: str = "us-east-1",
target_aws_region: str = "us-east-1",
):
##########
### Setup configuration
##########
self.is_test = os.environ.get(UNIT_TEST_KEY, False)
self.source_aws_key = source_aws_key
self.source_aws_secret = source_aws_secret
self.source_aws_region = source_aws_region
self.target_aws_key = target_aws_key
self.target_aws_secret = target_aws_secret
self.target_aws_region = target_aws_region
### Setting up the S3 Clients
if not self.is_test:
self.s3_client_source = boto3.client(
"s3",
region_name=self.source_aws_region,
aws_access_key_id=self.source_aws_key,
aws_secret_access_key=self.source_aws_secret,
)
self.s3_client_target = boto3.client(
"s3",
region_name=self.target_aws_region,
aws_access_key_id=self.target_aws_key,
aws_secret_access_key=self.target_aws_secret,
)
def duplicate_objects(
self,
source_bucket: str,
target_bucket: str,
source_bucket_prefix: str,
target_path: str,
source_bucket_suffix: str = "",
):
self.__do_transfer(
source_bucket=source_bucket,
target_bucket=target_bucket,
source_bucket_prefix=source_bucket_prefix,
target_path=target_path,
source_bucket_suffix=source_bucket_suffix,
isMove=False,
)
def move_objects(
self,
source_bucket: str,
target_bucket: str,
source_bucket_prefix: str,
target_path: str,
source_bucket_suffix: str = "",
):
self.__do_transfer(
source_bucket=source_bucket,
target_bucket=target_bucket,
source_bucket_prefix=source_bucket_prefix,
target_path=target_path,
source_bucket_suffix=source_bucket_suffix,
isMove=True,
)
def __do_transfer(
self,
source_bucket: str,
target_bucket: str,
source_bucket_prefix: str,
target_path: str,
source_bucket_suffix: str,
isMove: bool = False,
):
# String for Printing Operations
operation = "copy"
if isMove:
operation = "move"
try:
payload = self.s3_client_source.list_objects_v2(
Bucket=source_bucket, Prefix=source_bucket_prefix
)
if payload["KeyCount"] == 0:
logging.info(f"No files to {operation}.")
else:
keyCount = 0
for item in payload["Contents"]:
filepath = item["Key"]
# Checks first if file matches suffix
if filepath.endswith(source_bucket_suffix):
# Increase Key Count per matched suffix
keyCount += 1
if len(filepath.split("/")) > 1:
deductLength = len(filepath.split("/")[0]) + 1
else:
deductLength = 0
filename = filepath[deductLength:]
logging.info(f"filename: {filename}")
if filename is not "":
logging.info(
f"Sending file {source_bucket}/{filepath} to {target_bucket}/{target_path}/{filename}"
)
logging.info(
f"filename to {operation}: {filename}"
)
copy_source = {
"Bucket": source_bucket,
"Key": filepath,
}
if not self.is_test:
copy_response = self.s3_client_target.copy_object(
CopySource=copy_source,
Bucket=target_bucket,
Key=f"{target_path}/{filename}",
)
logging.info(copy_response)
if (
copy_response["ResponseMetadata"][
"HTTPStatusCode"
]
!= 200
):
logging.error(
f"Failed to {operation}: {fileName}"
)
if isMove:
delete_response = self.s3_client_source.delete_object(
Bucket=source_bucket, Key=filepath
)
logging.info(delete_response)
if (
delete_response["ResponseMetadata"][
"HTTPStatusCode"
]
!= 200
):
logging.error(
f"Failed to delete: {fileName}"
)
if keyCount == 0:
logging.info(f"No files to {operation}.")
except Exception as e:
logging.error(e)
raise e
|
import sys, os
import scipy
from pylab import *
from matplotlib import *
from scipy.stats import *
from numpy import *
from scipy import *
import kepfit
import kepmsg
"""
This code is based on the PyKE routine kepsff
found at keplerscience.arc.nasa.gov
The kepsff code is based on Vanderberg and Johnson 2014.
If you use this you must cite V&J 2014.
"""
def martinsff(intime,indata,centr1,centr2,
npoly_cxcy,sigma_cxcy,npoly_ardx,
npoly_dsdt,sigma_dsdt,npoly_arfl,sigma_arfl,verbose,logfile,
status):
# startup parameters
status = 0
labelsize = 16
ticksize = 14
xsize = 20
ysize = 8
lcolor = '#0000ff'
lwidth = 1.0
fcolor = '#ffff00'
falpha = 0.2
seterr(all="ignore")
# fit centroid data with low-order polynomial
cfit = zeros((len(centr2)))
csig = zeros((len(centr2)))
functype = 'poly' + str(npoly_cxcy)
pinit = array([nanmean(centr2)])
if npoly_cxcy > 0:
for j in range(npoly_cxcy):
pinit = append(pinit,0.0)
try:
coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
kepfit.lsqclip(functype,pinit,centr1,centr2,None,sigma_cxcy,sigma_cxcy,10,logfile,verbose)
for j in range(len(coeffs)):
cfit += coeffs[j] * numpy.power(centr1,j)
csig[:] = sigma
except:
message = 'ERROR -- KEPSFF: could not fit centroid data with polynomial. There are no data points within the range of input rows %d - %d. Either increase the stepsize (with an appreciation of the effects on light curve quality this will have!), or better yet - cut the timeseries up to remove large gaps in the input light curve using kepclip.' % (t1,t2)
status = kepmsg.err(logfile,message,verbose)
# sys.exit('')
os._exit(1)
# reject outliers
time_good = array([],'float64')
centr1_good = array([],'float32')
centr2_good = array([],'float32')
flux_good = array([],'float32')
cad_good = array([],'int')
for i in range(len(cfit)):
if abs(centr2[i] - cfit[i]) < sigma_cxcy * csig[i]:
time_good = append(time_good,intime[i])
centr1_good = append(centr1_good,centr1[i])
centr2_good = append(centr2_good,centr2[i])
flux_good = append(flux_good,indata[i])
# covariance matrix for centroid time series
centr = concatenate([[centr1_good] - mean(centr1_good), [centr2_good] - mean(centr2_good)])
covar = cov(centr)
# eigenvector eigenvalues of covariance matrix
[eval, evec] = numpy.linalg.eigh(covar)
ex = arange(-10.0,10.0,0.1)
epar = evec[1,1] / evec[0,1] * ex
enor = evec[1,0] / evec[0,0] * ex
ex = ex + mean(centr1)
epar = epar + mean(centr2_good)
enor = enor + mean(centr2_good)
# rotate centroid data
centr_rot = dot(evec.T,centr)
# fit polynomial to rotated centroids
rfit = zeros((len(centr2)))
rsig = zeros((len(centr2)))
functype = 'poly' + str(npoly_ardx)
pinit = array([nanmean(centr_rot[0,:])])
pinit = array([1.0])
if npoly_ardx > 0:
for j in range(npoly_ardx):
pinit = append(pinit,0.0)
try:
coeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
kepfit.lsqclip(functype,pinit,centr_rot[1,:],centr_rot[0,:],None,100.0,100.0,1,
logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
rx = linspace(nanmin(centr_rot[1,:]),nanmax(centr_rot[1,:]),100)
ry = zeros((len(rx)))
for i in range(len(coeffs)):
ry = ry + coeffs[i] * numpy.power(rx,i)
# calculate arclength of centroids
s = zeros((len(rx)))
for i in range(1,len(s)):
work3 = ((ry[i] - ry[i-1]) / (rx[i] - rx[i-1]))**2
s[i] = s[i-1] + math.sqrt(1.0 + work3) * (rx[i] - rx[i-1])
# fit arclength as a function of strongest eigenvector
sfit = zeros((len(centr2)))
ssig = zeros((len(centr2)))
functype = 'poly' + str(npoly_ardx)
pinit = array([nanmean(s)])
if npoly_ardx > 0:
for j in range(npoly_ardx):
pinit = append(pinit,0.0)
try:
acoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plotx, ploty, status = \
kepfit.lsqclip(functype,pinit,rx,s,None,100.0,100.0,100,logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
# correlate arclength with detrended flux
t = copy(time_good)
y = copy(flux_good)
z = centr_rot[1,:]
x = zeros((len(z)))
for i in range(len(acoeffs)):
x = x + acoeffs[i] * numpy.power(z,i)
# calculate time derivative of arclength s
dx = zeros((len(x)))
for i in range(1,len(x)):
dx[i] = (x[i] - x[i-1]) / (t[i] - t[i-1])
dx[0] = dx[1]
# fit polynomial to derivative and flag outliers (thruster firings)
dfit = zeros((len(dx)))
dsig = zeros((len(dx)))
functype = 'poly' + str(npoly_dsdt)
pinit = array([nanmean(dx)])
if npoly_dsdt > 0:
for j in range(npoly_dsdt):
pinit = append(pinit,0.0)
try:
dcoeffs, errors, covar, iiter, dsigma, chi2, dof, fit, dumx, dumy, status = \
kepfit.lsqclip(functype,pinit,t,dx,None,3.0,3.0,10,logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
for i in range(len(dcoeffs)):
dfit = dfit + dcoeffs[i] * numpy.power(t,i)
centr1_pnt = array([],'float32')
centr2_pnt = array([],'float32')
time_pnt = array([],'float64')
flux_pnt = array([],'float32')
dx_pnt = array([],'float32')
s_pnt = array([],'float32')
time_thr = array([],'float64')
flux_thr = array([],'float32')
dx_thr = array([],'float32')
thr_cadence = zeros(len(t),dtype=bool)
for i in range(len(t)):
if dx[i] < dfit[i] + sigma_dsdt * dsigma and dx[i] > dfit[i] - sigma_dsdt * dsigma:
time_pnt = append(time_pnt,time_good[i])
flux_pnt = append(flux_pnt,flux_good[i])
dx_pnt = append(dx_pnt,dx[i])
s_pnt = append(s_pnt,x[i])
centr1_pnt = append(centr1_pnt,centr1_good[i])
centr2_pnt = append(centr2_pnt,centr2_good[i])
else:
time_thr = append(time_thr,time_good[i])
flux_thr = append(flux_thr,flux_good[i])
dx_thr = append(dx_thr,dx[i])
thr_cadence[i] = True
# fit arclength-flux correlation
cfit = zeros((len(time_pnt)))
csig = zeros((len(time_pnt)))
functype = 'poly' + str(npoly_arfl)
pinit = array([nanmean(flux_pnt)])
if npoly_arfl > 0:
for j in range(npoly_arfl):
pinit = append(pinit,0.0)
try:
ccoeffs, errors, covar, iiter, sigma, chi2, dof, fit, plx, ply, status = \
kepfit.lsqclip(functype,pinit,s_pnt,flux_pnt,None,sigma_arfl,sigma_arfl,100,logfile,verbose)
except:
message = 'ERROR -- KEPSFF: could not fit rotated centroid data with polynomial'
status = kepmsg.err(logfile,message,verbose)
# correction factors for unfiltered data
centr = concatenate([[centr1] - mean(centr1_good), [centr2] - mean(centr2_good)])
centr_rot = dot(evec.T,centr)
yy = copy(indata)
zz = centr_rot[1,:]
xx = zeros((len(zz)))
cfac = zeros((len(zz)))
for i in range(len(acoeffs)):
xx = xx + acoeffs[i] * numpy.power(zz,i)
for i in range(len(ccoeffs)):
cfac = cfac + ccoeffs[i] * numpy.power(xx,i)
# apply correction to flux time-series
out_detsap = indata / cfac
return out_detsap, cfac, thr_cadence
|
def theme_alberta():
"""
Applies a University of Alberta theme to all subsequential altair plot objects so they are displayed with the U of A visual identity.
See the visual identity at https://www.ualberta.ca/toolkit/visual-identity/our-colours.
Four palettes based on the U of A visual identity guidelines can be selected: 'alpha', 'beta', 'gamma' and 'delta'.
See more details about the package on GitHub: https://github.com/UBC-MDS/hueniversitypy/blob/master/README.md
Returns
-------
altair plot : altair.vegalite.v4.api.Chart
an altair plot with the U of A visual identity colour theme applied.
Example
----------
>>> from hueniversitypy.theme_alberta import *
>>> data = pandas.DataFrame({'X': numpy.random.randint(100, size=100),
'Y': numpy.random.randint(100, size=100),
'Cat': [['A', 'B', 'C'][numpy.random.randint(3, size=1)[0]] for i in range(100)]})
>>> scatterplot = (altair.Chart(data).mark_circle(size=60, opacity=0.5).encode(x='X', y='Y', color='Cat'))
>>> altair.themes.register("theme_alberta", theme_alberta)
>>> altair.themes.enable("theme_alberta")
>>> scatterplot
"""
# Code attribution: Sergio Sanchez
# https://towardsdatascience.com/consistently-beautiful-visualizations-with-altair-themes-c7f9f889602
# University font
font = "Arial"
labelFont = "Arial"
# Specify colour palette for Alberta
alberta_palette = ["#007C41", "#FFDB05", "#7D9AAA", "#A8B400", "#A79E70"]
return {
"config": {
# Title font and size
"title": {
"fontSize" : 18,
"font": font,
"anchor": "start",
"fontColor": "#000000"
} ,
# Axes font and sizes
"axisX": {
"labelFont": labelFont,
"labelFontSize": 12,
"titleFont": font,
"titleFontSize": 12,
"title": "X Axis Title (units)"
},
"axisY": {
"labelFont": labelFont,
"labelFontSize": 12,
"titleFont": font,
"titleFontSize": 12,
"title": "Y Axis Title (units)"
},
# Add colour palette
"range": {
"category": alberta_palette
}
}
}
|
"""Setuptools configuration for Grd2Shp_Xagg."""
from os import path
from setuptools import find_packages
from setuptools import setup
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "requirements.txt")) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [
line
for line in requirements_file.read().splitlines()
if not line.startswith("#")
]
setup_requirements = [
"pytest-runner",
]
test_requirements = [
"pytest>=3",
]
setup(
author="rmcd@usgs.gov",
author_email="rmcd@usgs.gov",
python_requires=">=3.8",
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
description="Interpolate gridded input to geometry polygons",
entry_points={
"console_scripts": [
"grd2shp_xagg=grd2shp_xagg.__main__:main",
],
},
install_requires=requirements,
license="MIT",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords="grd2shp_xagg,grd2shp_xagg",
name="grd2shp_xagg",
packages=find_packages(where="src"),
package_dir={"": "src"},
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/rmcd-mscb/grd2shp_xagg",
version="0.0.1-dev0",
zip_safe=False,
)
|
"""Library of aospy.Proj objects that I use."""
from .aero_3agcm import aero_3agcm
# from .burls import burls
# from .cmip5 import cmip5
from .gcm_input import gcm_input
# from .obs import obs
|
from flask_jwt import jwt_required
from flask_restful import Resource, reqparse
from ..models import JobModel, MeasurementModel, MetricModel
class Measurement(Resource):
parser = reqparse.RequestParser()
parser.add_argument(
"value",
type=float,
required=True,
help="This field cannot be left blank.",
)
parser.add_argument(
"unit",
type=str,
required=True,
help="This field cannot be left blank.",
)
parser.add_argument(
"metric",
type=str,
required=True,
help="You must provide a metric name associated "
"to the measurement.",
)
def get(self, job_id):
"""
Retrieve all measurements performed by a verification job.
---
tags:
- Metric Measurements
parameters:
- name: job_id
in: path
type: integer
description: ID of the job.
required: true
responses:
200:
description: List of Measurements successfully retrieved.
404:
description: Job not found.
"""
# find the corresponding job
job = JobModel.find_by_id(job_id)
if job:
# find the associated measurements
measurements = MeasurementModel.find_by_job_id(job.id)
return {
"measurements": [
measurement.json() for measurement in measurements
]
}
else:
message = "Job `{}` not found.".format(job_id)
return {"message": message}, 404
@jwt_required()
def post(self, job_id):
"""
Create a new measurement for an existing job.
---
tags:
- Metric Measurements
parameters:
- name: job_id
in: path
type: integer
description: ID of the job.
required: true
- in: body
name: "Request body:"
schema:
type: object
required:
- metric
- value
- unit
properties:
metric:
type: string
value:
type: number
unit:
type: string
responses:
201:
description: Measurement successfully created.
401:
description: >
Authorization Required. Request does not contain a
valid access token.
404:
description: Job or associated metric not found.
500:
description: An error occurred inserting the measurement.
"""
data = Measurement.parser.parse_args()
# find the corresponding job
job = JobModel.find_by_id(job_id)
if job:
metric_name = data["metric"]
# find the associated metric
metric = MetricModel.find_by_name(metric_name)
else:
message = "Job `{}` not found.".format(job_id)
return {"message": message}, 404
if metric:
measurement = MeasurementModel(job.id, metric.id, **data)
else:
message = "Metric `{}` not found.".format(metric_name)
return {"message": message}, 404
try:
measurement.save_to_db()
except Exception:
return {
"message": "An error occurred inserting the " "measurement."
}, 500
return measurement.json(), 201
class MeasurementList(Resource):
def get(self):
"""
Retrieve the complete list of measurements.
---
tags:
- Metric Measurements
responses:
200:
description: List of Measurements successfully retrieved.
"""
return {
"measurements": [
measurement.json()
for measurement in MeasurementModel.query.all()
]
}
|
_root.createTextField("mytext",1,100,100,300,100);
|
from wand.image import Image
import io
import os
import tempfile
import math
from PyPDF2 import PdfFileWriter, PdfFileReader
from mmdet.apis import init_detector, inference_detector, show_result_pyplot
class ExtractTable(object):
"""
Utilizes CascadeTabNet (https://github.com/DevashishPrasad/CascadeTabNet) to detect table boundaries
These table boundaries are then used by pdf table extraction libraries to extract table from
specific regions that they would've otherwise missed.
"""
def __init__(self, pdf_fpath, pagenums=None, resolution=200,
threshold=0.85, model_device='cpu', stretch_bounds=0.1,
origin='top-left'):
self.pdf_fpath = pdf_fpath
self.pagenums = pagenums
self.resolution = resolution
self.threshold = threshold
self.model_device = model_device
self.stretch_bounds = stretch_bounds
self.origin = origin
self.load_model()
def load_model(self):
self._config_fpath = os.environ['TABNET_CONFIGPATH']
self._model_fpath = os.environ['TABNET_MODELPATH']
self._model = init_detector(self._config_fpath, self._model_fpath, device=self.model_device)
def convert_pdf2imgs(self, dirname):
inputpdf = PdfFileReader(open(self.pdf_fpath, 'rb'))
pages = list(range(inputpdf.numPages)) if self.pagenums is None else self.pagenums
imgpaths = {}
for pagenum in pages:
fname = os.path.join(dirname, f'pdfimg-{pagenum}.jpeg')
dst_pdf = PdfFileWriter()
dst_pdf.addPage(inputpdf.getPage(pagenum))
pdf_bytes = io.BytesIO()
dst_pdf.write(pdf_bytes)
pdf_bytes.seek(0)
img = Image(file=pdf_bytes, resolution=self.resolution)
img.convert('jpeg')
img.save(filename=fname)
imgsize = img.size
imgpaths[pagenum] = {'fpath': fname, 'shape': imgsize}
return imgpaths
def get_page_props(self, pagenum):
inputpdf = PdfFileReader(open(self.pdf_fpath, 'rb'))
dims = inputpdf.getPage(pagenum).mediaBox
_, _, width, height = dims
return float(width), float(height)
def unnormalize_boundaries(self, boundaries, width, height):
coords = []
for boundary in boundaries:
x1, y1, x2, y2 = boundary
x1 = math.floor(x1 * width)
y1 = math.floor(y1 * height)
x2 = math.ceil(x2 * width)
y2 = math.ceil(y2 * height)
coords.append((x1, y1, x2, y2))
return coords
def correct_for_origin(self, coords, width, height):
"""
Computed coordinates from the model assumes "top-left"
as the origin. Some libraries though define "bottom-left"
as the origin, and therefore, this method corrects the
computed coordinates
"""
result = []
for coordinate in coords:
x1, y1, x2, y2 = coordinate
if self.origin == 'top-left':
pass # do nothing since the model has the same origin
elif self.origin == 'bottom-left':
y1 = height - y1
y2 = height - y2
elif self.origin == 'top-right':
x1 = width - x1
x2 = width - x2
elif self.origin == 'bottom-right':
x1 = width - x1
x2 = width - x2
y1 = height - y1
y2 = height - y2
else:
raise AttributeError('origin can only be [top-left, top-right, bottom-left, bottom-right]')
result.append((x1, y1, x2, y2))
return result
def get_table_boundaries(self, fpath, imgsize):
def get_table_coords(tblarray, width, height):
tables = []
for tbl in tblarray:
x1, y1, x2, y2, conf = tbl
if conf < self.threshold:
continue
# normalize coords
x1 = x1 / float(width)
y1 = y1 / float(height)
x2 = x2 / float(width)
y2 = y2 / float(height)
tables.append((x1, y1, x2, y2))
return tables
result = inference_detector(self._model, fpath)
width, height = imgsize
tables = []
tables.extend(get_table_coords(result[0][0], width, height)) # Get bordered tables
tables.extend(get_table_coords(result[0][2], width, height)) # Get unbordered tables
return tables
def stretch_boundaries(self, boundaries, width, height):
result = []
for boundary in boundaries:
x1, y1, x2, y2 = boundary
box_w = x2 - x1
box_h = y2 - y1
x1 = max(0, x1 - math.floor(self.stretch_bounds * box_w))
y1 = max(0, y1 - math.floor(self.stretch_bounds * box_h))
x2 = min(width, math.ceil(x2 + self.stretch_bounds * box_w))
y2 = min(height, math.ceil(y2 + self.stretch_bounds * box_h))
result.append((x1, y1, x2, y2))
return result
def extract(self):
tables = {} # pagenum -> table coordinates dictionary
with tempfile.TemporaryDirectory() as tmpdirname:
imgpaths = self.convert_pdf2imgs(tmpdirname)
for pagenum in sorted(imgpaths.keys()):
val = imgpaths[pagenum]
fpath = val['fpath']
imgshape = val['shape']
table_boundaries = self.get_table_boundaries(fpath, imgshape) # normalized boundaries
width, height = self.get_page_props(pagenum) # shape of PDF
table_boundaries = self.unnormalize_boundaries(table_boundaries, width, height) # boundaries in the PDF space
table_boundaries = self.stretch_boundaries(table_boundaries, width, height)
table_boundaries = self.correct_for_origin(table_boundaries, width, height)
tables[pagenum] = table_boundaries
return tables
|
"""
Light-weighted Simulation Engine
"""
from collections import deque
import heapq
import copy
import time
import datetime
from evsim.definition import *
from evsim.default_message_catcher import *
from evsim.behavior_model import *
from evsim.system_object import *
class SysExecutor(SysObject, BehaviorModel):
EXTERNAL_SRC = "SRC"
EXTERNAL_DST = "DST"
def __init__(self, _time_step, _sim_name='default', _sim_mode='VIRTUAL_TIME'):
BehaviorModel.__init__(self, _sim_name)
self.global_time = 0
self.target_time = 0
self.time_step = _time_step # time_step may changed? - cbchoi
# dictionary for waiting simulation objects
self.waiting_obj_map = {}
# dictionary for active simulation objects
self.active_obj_map = {}
# dictionary for object to ports
self.port_map = {}
# added by cbchoi 2020.01.20
self.hierarchical_structure = {}
self.min_schedule_item = deque()
self.sim_init_time = datetime.datetime.now()
# self.eval_time = 0
self.dmc = DefaultMessageCatcher(0, Infinite, "dc", "default")
self.register_entity(self.dmc)
self.simulation_mode = SimulationMode.SIMULATION_IDLE
# External Interface
self.input_event_queue = []
self.output_event_queue = deque()
# TIME Handling
self.sim_mode = _sim_mode
# Learning Module
self.learn_module = None
# retrieve global time
def get_global_time(self):
return self.global_time
def register_entity(self, sim_obj):
#print((sim_obj,))
if not sim_obj.get_create_time() in self.waiting_obj_map:
self.waiting_obj_map[sim_obj.get_create_time()] = list()
self.waiting_obj_map[sim_obj.get_create_time()].append(sim_obj)
def create_entity(self):
if len(self.waiting_obj_map.keys()) != 0:
key = min(self.waiting_obj_map)
if key <= self.global_time:
lst = self.waiting_obj_map[key]
for obj in lst:
# print("global:",self.global_time," create agent:", obj.get_obj_name())
self.active_obj_map[obj.get_obj_id()] = obj
# self.min_schedule_item.append((obj.time_advance() + self.global_time, obj))
obj.set_req_time(self.global_time)
self.min_schedule_item.append(obj)
del self.waiting_obj_map[key]
# select object that requested minimum time
self.min_schedule_item = deque(sorted(self.min_schedule_item, key=lambda bm: bm.get_req_time()))
def destroy_entity(self):
if len(self.active_obj_map.keys()) != 0:
delete_lst = []
for agent_name, agent in self.active_obj_map.items():
if agent.get_destruct_time() <= self.global_time:
delete_lst.append(agent)
for agent in delete_lst:
#print("global:",self.global_time," del agent:", agent.get_name())
del(self.active_obj_map[agent.get_obj_id()])
port_del_lst = []
for key, value in self.port_map.items():
if value[0][0] is agent:
port_del_lst.append(key)
for key in port_del_lst:
del(self.port_map[key])
self.min_schedule_item.remove(agent)
def coupling_relation(self, src_obj, out_port, dst_obj, in_port):
if (src_obj, out_port) in self.port_map:
self.port_map[(src_obj, out_port)].append((dst_obj, in_port))
else:
self.port_map[(src_obj, out_port)] = [(dst_obj, in_port)]
# self.port_map_wName.append((src_obj.get_name(), out_port, dst_obj.get_name(), in_port))
def _coupling_relation(self, src, dst):
if src in self.port_map:
self.port_map[src].append(dst)
else:
self.port_map[src] = [dst]
# self.port_map_wName.append((src_obj.get_name(), out_port, dst_obj.get_name(), in_port))
'''
def update_coupling_relation(self):
self.port_map.clear()
for i in range(len(self.port_map_wName)):
src_obj_name = self.port_map_wName[i][0]
src_obj = None
# find loaded obj with name
for q in range(len(self.min_schedule_item)):
if self.min_schedule_item[q].get_name() == src_obj_name:
src_obj = self.min_schedule_item[q]
out_port = self.port_map_wName[i][1]
dst_obj_name = self.port_map_wName[i][2]
dst_obj = None
for q in range(len(self.min_schedule_item)):
if self.min_schedule_item[q].get_name() == dst_obj_name:
dst_obj = self.min_schedule_item[q]
in_port = self.port_map_wName[i][3]
self.port_map[(src_obj, out_port)] = (dst_obj, in_port)
'''
def output_handling(self, obj, msg):
if msg is not None:
pair = (obj, msg.get_dst())
if pair not in self.port_map:
self.port_map[pair] = [(self.active_obj_map[self.dmc.get_obj_id], "uncaught")]
for port_pair in self.port_map[pair]:
destination = port_pair
if destination is None:
print("Destination Not Found")
print(self.port_map)
raise AssertionError
if destination[0] is None:
self.output_event_queue.append((self.global_time, msg.retrieve()))
else:
# Receiver Message Handling
destination[0].ext_trans(destination[1], msg)
# Receiver Scheduling
# wrong : destination[0].set_req_time(self.global_time + destination[0].time_advance())
self.min_schedule_item.remove(destination[0])
destination[0].set_req_time(self.global_time)
self.min_schedule_item.append(destination[0])
#self.min_schedule_item = deque(sorted(self.min_schedule_item, key=lambda bm: bm.get_req_time()))
# self.min_schedule_item.pop()
# self.min_schedule_item.append((destination[0].time_advance() + self.global_time, destination[0]))
def flattening(self, _model, _del_lst):
# handle external output coupling
del_lst = []
for k, v in _model.retrieve_external_output_coupling().items():
for coupling in self.port_map[v]:
#print (self.port_map[v])
#print (k,coupling)
self._coupling_relation(k, coupling)
del_lst.append(v)
for item in del_lst:
if item in self.port_map:
del self.port_map[item]
# handle external input coupling
for k, v in _model.retrieve_external_input_coupling().items():
port_key_lst = []
for sk, sv in self.port_map.items():
if k in sv:
port_key_lst.append(sk)
for key in port_key_lst:
self.port_map[key].remove(k)
self.port_map[key].extend(v)
# handle internal coupling
for k, v, in _model.retrieve_internal_coupling().items():
for dst in v:
self._coupling_relation(k, dst)
# manage model hierarchical
for m in _model.retrieve_models():
if m.get_type() == ModelType.STRUCTURAL:
self.flattening(m, _del_lst)
else:
#print((m,))
self.register_entity(m)
for k, model_lst in self.waiting_obj_map.items():
if _model in model_lst:
_del_lst.append((k, _model))
for target in del_lst:
self.waiting_obj_map[target].remove(_model)
def init_sim(self):
self.simulation_mode = SimulationMode.SIMULATION_RUNNING
# Flattening\
_del_lst = []
for model_lst in self.waiting_obj_map.values():
for model in model_lst:
if model.get_type() == ModelType.STRUCTURAL:
self.flattening(model, _del_lst)
for target, _model in _del_lst:
self.waiting_obj_map[target].remove(_model)
# setup inital time
if self.active_obj_map is None:
self.global_time = min(self.waiting_obj_map)
# search min_scedule_item after first init_sim call
if not self.min_schedule_item:
for obj in self.active_obj_map.items():
if obj[1].time_advance() < 0: # exception handling for parent instance
print("You should give positive real number for the deadline")
raise AssertionError
obj[1].set_req_time(self.global_time)
self.min_schedule_item.append(obj[1])
def schedule(self):
# Agent Creation
self.create_entity()
self.handle_external_input_event()
tuple_obj = self.min_schedule_item.popleft()
before = time.perf_counter() # TODO: consider decorator
while tuple_obj.get_req_time() <= self.global_time:
msg = tuple_obj.output()
if msg is not None:
self.output_handling(tuple_obj, msg)
# Sender Scheduling
tuple_obj.int_trans()
tuple_obj.set_req_time(self.global_time)
self.min_schedule_item.append(tuple_obj)
self.min_schedule_item = deque(sorted(self.min_schedule_item, key=lambda bm: bm.get_req_time()))
tuple_obj = self.min_schedule_item.popleft()
self.min_schedule_item.appendleft(tuple_obj)
# update Global Time
self.global_time += self.time_step
after = time.perf_counter()
if self.sim_mode == "REAL_TIME":
time.sleep((lambda x: x if x > 0 else 0)(float(self.time_step) - float(after-before)))
# Agent Deletion
self.destroy_entity()
def simulate(self, _time=Infinite):
# Termination Condition
self.target_time = self.global_time + _time
# Get minimum scheduled event
self.init_sim()
while self.global_time < self.target_time:
if not self.waiting_obj_map:
if self.min_schedule_item[0].get_req_time() == Infinite and self.sim_mode == 'VIRTUAL_TIME' :
self.simulation_mode = SimulationMode.SIMULATION_TERMINATED
break
self.schedule()
def simulation_stop(self):
self.global_time = 0
self.target_time = 0
self.time_step = 1 # time_step may changed? - cbchoi
# dictionary for waiting simulation objects
self.waiting_obj_map = {}
# dictionary for active simulation objects
self.active_obj_map = {}
# dictionary for object to ports
self.port_map = {}
# self.port_map_wName = []
self.min_schedule_item = deque()
self.sim_init_time = datetime.datetime.now()
# self.eval_time = 0
self.dmc = DefaultMessageCatcher(0, Infinite, "dc", "default")
self.register_entity(dmc)
# External Event Handling - by cbchoi
def insert_external_event(self, _port, _msg, scheduled_time=0):
sm = SysMessage("SRC", _port)
sm.insert(_msg)
if _port in self._input_ports:
heapq.heappush(self.input_event_queue, (scheduled_time + self.global_time, sm))
if self.simulation_mode != SimulationMode.SIMULATION_IDLE:
self.handle_external_input_event()
else:
# TODO Exception Handling
print("[ERROR][INSERT_EXTERNAL_EVNT] Port Not Found")
pass
def get_generated_event(self):
return self.output_event_queue
def handle_external_input_event(self):
event_list = [ev for ev in self.input_event_queue if ev[0] <= self.global_time]
for event in event_list:
self.output_handling(None, event[1])
heapq.heappop(self.input_event_queue)
self.min_schedule_item = deque(sorted(self.min_schedule_item, key=lambda bm: bm.get_req_time()))
pass
def handle_external_output_event(self):
event_lists = copy.deepcopy(self.output_event_queue)
self.output_event_queue.clear()
return event_lists
def is_terminated(self):
return self.simulation_mode == SimulationMode.SIMULATION_TERMINATED
def set_learning_module(self, learn_module):
self.learn_module = learn_module
pass
def get_learning_module(self):
return self.learn_module
|
from keras.preprocessing import image
import numpy as np
from keras.models import model_from_json
arquivo = open('classificador_gato_cachorro.json','r')
estrutura_rede = arquivo.read()
arquivo.close()
classificador = model_from_json(estrutura_rede)
classificador.load_weights('classificador_gato_cachorro.h5')
#GATO --> dataset/test_set/gato/cat.3500.jpg
#CACHORRO --> dataset/test_set/cachorro/dog.3500.jpg
imagem_teste = image.load_img('dataset/test_set/gato/cat.3963.jpg',target_size = (64,64))
imagem_teste = image.img_to_array(imagem_teste)
imagem_teste /= 255
imagem_teste = np.expand_dims(imagem_teste,axis = 0)
previsao = classificador.predict(imagem_teste)
print(previsao)
print('é um gato? :' + str(previsao > 0.5) )
|
import os
import sys
import argparse
import csv
import datetime
from pathlib import Path
import shutil
import logging
import requests
import xml.etree.ElementTree as ET
import urllib3
from healthcloud_user_csvjinja import HealthcloudUserCSVJinja
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Create logger
logging.basicConfig()
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser(description='This script will create users in portal and reset their password.')
parser.add_argument('-v','--verbose', help='Enable verbose logging', action='count', default=0)
parser.add_argument('-i', '--input', help='input file containing mapping to be used in template', required=True, action='store')
parser.add_argument('--hostname', help='Environment to perform changes to (e.g. localhost)', required=True, default="localhost", action='store', nargs='+')
parser.add_argument('--env', help='environment to build (e.g. SIT, UAT, PROD)', required=True, action='store', choices=["SIT","UAT","PROD"])
return parser.parse_args()
def validate_args(main_args):
if not os.path.isfile(main_args.input):
raise SystemExit("input file does not exist: {0}".format(main_args.input))
def send_request(output,host,error_file_pfx):
ns = {'web': 'http://webservices.ht.carecom.dk/',
'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/'}
url = "https://{}:19043/ws/UserManagementServiceSei?wsdl".format(host)
try:
r = requests.post(url, data=output, verify=False)
# print(r.content)
responseRoot = ET.fromstring(r.content.decode('utf-8',errors='ignore'))
# Check for a Soap Fault
faultstring = responseRoot.find('.//faultstring',namespaces=ns)
# print("faultstring {}".format(faultstring.text))
if faultstring is not None and logger.isEnabledFor(logging.INFO):
logger.debug("error occurred: {}".format(faultstring.text))
with open("./errors/" + error_file_pfx +"-error.log","w+") as errorFile:
errorFile.write(output + "\n\n")
errorFile.write(r.content.decode('utf-8',errors='ignore'))
return r.status_code
except requests.exceptions.RequestException as e: # This is the correct syntax
logger.exception("Exception occurred")
sys.exit()
def filter_env(row,env):
return row[env] == "Y"
def main():
args = parse_args()
validate_args(args)
log_level = {
2: logging.DEBUG
,1: logging.INFO
,0: logging.CRITICAL
}
logger.setLevel(log_level[args.verbose])
if logger.isEnabledFor(logging.INFO):
logger.info("capturing errors")
p = Path("errors")
if p.is_dir():
shutil.rmtree("errors")
p.mkdir()
templates = ["create-user-v2.j2","reset-userpassword-v2.j2"]
converter = HealthcloudUserCSVJinja(args.input,templates)
filters = [lambda row: filter_env(row,args.env)]
converter.render_csv(filters)
# Send requests into environment
for i,user in enumerate(converter.users,start=1):
print("{}. user: {}".format(i,user.name))
for t,v in user.templates.items():
print(" - template: {}".format(t))
for host in args.hostname:
status = send_request(v,host,error_file_pfx=user.name)
print(" host:{}, status:{}".format(host,"error" if status is None else status))
print("======================================================================================================")
"""
Execution Script
"""
if __name__ == '__main__':
main()
|
import pytest
import servo
import servo.connectors.scripts
import tests.helpers
@pytest.fixture
def configuration() -> servo.connectors.scripts.ScriptsConfiguration:
return servo.connectors.scripts.ScriptsConfiguration(
before={
'measure': ['echo 123']
},
after={
'measure': ['echo 456']
},
)
@pytest.fixture
def connector(configuration: servo.connectors.scripts.ScriptsConfiguration) -> servo.connectors.scripts.ScriptsConnector:
return servo.connectors.scripts.ScriptsConnector(config=configuration)
async def test_scripts(connector: servo.connectors.scripts.ScriptsConnector) -> None:
connectors = [connector, tests.helpers.MeasureConnector(config=servo.BaseConfiguration())]
_servo = servo.Servo(
config={"optimizer": servo.Optimizer(id="dev.opsani.com/servox", token="1234556789")},
connectors=connectors,
__connectors__=connectors
)
await _servo.dispatch_event('startup')
result = await _servo.dispatch_event('measure')
debug("result is", result)
assert result != None
|
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.forms import modelformset_factory
from django.http import JsonResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.template.loader import render_to_string
from django.views.decorators.http import require_POST
from goal_journal.journal.forms import EntryForm
from .forms import GoalForm, NewCategoryForm, ActionForm, ActionFormSet, ActionLogForm
from .models import Category, Goal, Action, ActionLog, GoalScore
@login_required
def goals_list_view(request, category_pk=None):
goals = Goal.objects.filter(user=request.user)
if not goals:
return new_goal_view(request)
context = {}
if category_pk:
category = get_object_or_404(Category, pk=category_pk)
goals = goals.filter(categories=category)
context['category'] = category
achieved_goals = goals.filter(goal_achieved=True)
current_goals = goals.filter(goal_achieved=False)
untracked_goals = current_goals.filter(goalscore__isnull=True)
tracked_goals = current_goals.exclude(goalscore__isnull=True)
# Order by current_score @property
tracked_goals = sorted(tracked_goals, key=lambda g: g.current_score, reverse=True)
categories = Category.objects.filter(user=request.user)
context.update({
'categories': categories,
'untracked_goals': untracked_goals,
'tracked_goals': tracked_goals,
'achieved_goals': achieved_goals,
})
return render(request, template_name='goals/goal_list.html', context=context)
@login_required
def goal_detail_view(request, goal_pk):
goal = get_object_or_404(Goal, pk=goal_pk, user=request.user)
goal_scores = GoalScore.objects.filter(goal=goal)
active_actions = Action.objects.filter(goals=goal, action_completed=False)
# set priority on action instances to length of queryset for actions with NULL priority values for sorting.
# but don't save to db
for action in active_actions:
if not action.priority:
action.priority = len(active_actions)
active_actions = sorted(active_actions, key=lambda a: a.priority)
ActionLogFormSet = modelformset_factory(ActionLog, form=ActionLogForm, extra=goal.actions.count())
action_log_formset = ActionLogFormSet(queryset=ActionLog.objects.none(), initial=[
{'action': action.pk} for action in goal.actions.all()])
action_log = ActionLog.objects.filter(action__goals=goal)[:5]
goals = Goal.objects.filter(user=request.user)
goals = goals.exclude(goalscore__isnull=True)
goals = goals.exclude(pk=goal.pk)
context = {
'goal': goal,
'goal_scores': goal_scores,
'entry_form': EntryForm(),
'active_actions': active_actions,
'action_formset': ActionFormSet(queryset=Action.objects.none()),
'action_log_formset': action_log_formset,
'action_log': action_log,
'goals': goals,
}
# only display chart for goals that have been tracked for over a day
if goal.goalscore_set.count():
display_chart = (goal.most_recent_action - goal.first_action).days >= 1
tracked_for_over_a_week = (goal.most_recent_action - goal.first_action).days > 7
context['display_chart'] = display_chart
context['tracked_for_over_a_week'] = tracked_for_over_a_week
return render(request, template_name='goals/goal_detail.html', context=context)
@login_required
def new_goal_view(request):
if request.method == 'POST':
goal_form = GoalForm(request.POST, user=request.user)
action_formset = ActionFormSet(request.POST, queryset=Action.objects.none(), prefix='actions')
if all([goal_form.is_valid(), action_formset.is_valid()]):
new_goal = goal_form.save(commit=False)
new_goal.user = request.user
new_goal.save()
goal_form.save_m2m()
actions = action_formset.save(commit=False)
for action in actions:
# don't create duplicate action objects
action, _ = Action.objects.get_or_create(action=action.action, user=request.user)
action.goals.add(new_goal)
action.save()
messages.success(request, 'You set a new goal! Start tracking your progress by recording your actions '
'below.')
return redirect('goals:goal_detail', goal_pk=new_goal.pk)
else:
messages.error(request, 'Please correct the form errors below.')
return redirect('goals:new_goal')
goal_form = GoalForm(user=request.user)
category_form = NewCategoryForm()
action_formset = ActionFormSet(queryset=Action.objects.none(), prefix='actions')
context = {
'goal_form': goal_form,
'category_form': category_form,
'action_formset': action_formset,
}
return render(request, template_name='goals/edit_goal.html', context=context)
@require_POST
@login_required
def new_category_view(request):
"""Ajax view for creating new categories from the new/edit goal page."""
category_form = NewCategoryForm(request.POST)
if not category_form.is_valid():
messages.error(request, category_form.errors)
return redirect('goals:new_goal')
category, _ = Category.objects.get_or_create(category=category_form.cleaned_data['category'].capitalize(),
user=request.user)
goal_form = GoalForm(user=request.user)
data = {
'new_category_id': category.pk,
'new_category': category.category.capitalize(),
'category_field': render_to_string(
'goals/_category_select.html',
{'goal_form': goal_form},
request=request
),
}
return JsonResponse(data)
@login_required
def edit_goal_view(request, pk):
goal = get_object_or_404(Goal, pk=pk, user=request.user)
if request.method == 'POST':
goal_form = GoalForm(request.POST, user=request.user, instance=goal)
action_formset = ActionFormSet(request.POST, queryset=Action.objects.filter(goals=goal), prefix='actions')
if all([goal_form.is_valid(), action_formset.is_valid()]):
goal = goal_form.save()
actions = action_formset.save(commit=False)
for action in actions:
action.user = request.user
action.save()
if action not in goal.actions.all():
goal.actions.add(action)
messages.success(request, 'Your changes have been saved!')
return redirect('goals:goal_detail', goal_pk=goal.pk)
else:
messages.error(request, 'Please correct the form errors below.')
return redirect('goals:edit_goal', pk=goal.pk)
goal_form = GoalForm(user=request.user, instance=goal)
category_form = NewCategoryForm()
action_formset = ActionFormSet(queryset=Action.objects.filter(goals=goal), prefix='actions')
context = {
'edit': True,
'goal': goal,
'goal_form': goal_form,
'category_form': category_form,
'action_formset': action_formset,
}
return render(request, template_name='goals/edit_goal.html', context=context)
@login_required
@require_POST
def delete_goal_view(request, pk):
goal = get_object_or_404(Goal, pk=pk, user=request.user)
goal.delete()
messages.success(request, "You deleted the goal {}.".format(goal.goal))
return redirect('goals:goal_list')
@login_required
@require_POST
def goal_achieved_view(request, pk):
goal = get_object_or_404(Goal, pk=pk, user=request.user)
goal.goal_achieved = True
goal.save()
messages.success(request, "CONGRATS! You achieved your goal: '{}'.".format(goal.goal))
return redirect('goals:goal_list')
@login_required
@require_POST
def new_action_view(request, goal_pk):
goal = get_object_or_404(Goal, pk=goal_pk, user=request.user)
action_formset = ActionFormSet(request.POST, queryset=Action.objects.none())
if not action_formset.is_valid():
messages.error(request, action_formset.errors)
return redirect('goals:goal_detail', goal_pk=goal_pk)
actions = action_formset.save(commit=False)
for action in actions:
# don't create duplicate action objects
action, _ = Action.objects.get_or_create(action=action.action, user=request.user)
action.goals.add(goal)
action.save()
messages.success(request, 'Your actions for this goal have been updated.')
return redirect('goals:goal_detail', goal_pk=goal.pk)
@login_required
def manage_action_view(request, action_pk, goal_pk):
goal = get_object_or_404(Goal, pk=goal_pk, user=request.user)
action = get_object_or_404(Action, pk=action_pk, user=request.user)
if request.method == 'POST':
action_form = ActionForm(request.POST, instance=action, user=request.user)
if action_form.is_valid():
action = action_form.save(commit=False)
for new_goal in action_form.cleaned_data['goals']:
action.goals.add(new_goal)
action.save()
if action.action_completed:
messages.success(request, 'Nice work! You completed one of your actions for this goal.')
else:
messages.success(request, 'You updated your action for this goal')
return redirect('goals:goal_detail', goal_pk=goal.pk)
else:
messages.error(request, action_form.errors)
return redirect('goals:manage_action', goal_pk=goal_pk, action_pk=action_pk)
action_form = ActionForm(instance=action, user=request.user)
action_log_form = ActionLogForm()
context = {
'goal': goal,
'action': action,
'action_form': action_form,
'action_log_form': action_log_form
}
return render(request, template_name='goals/action.html', context=context)
@login_required
@require_POST
def delete_action_view(request, goal_pk, action_pk):
goal = get_object_or_404(Goal, pk=goal_pk, user=request.user)
action = get_object_or_404(Action, pk=action_pk, user=request.user)
action.delete()
messages.success(request, "You deleted the action {} from {}.".format(action.action, goal.goal))
return redirect('goals:goal_detail', goal_pk=goal_pk)
@login_required
def action_log_list_view(request, goal_pk):
goal = get_object_or_404(Goal, pk=goal_pk, user=request.user)
action_log_all = ActionLog.objects.filter(action__goals=goal)
paginator = Paginator(action_log_all, 10)
page = request.GET.get('page')
try:
action_log = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
action_log = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
action_log = paginator.page(paginator.num_pages)
context = {
'goal': goal,
'action_log': action_log,
}
return render(request, template_name='goals/action_log.html', context=context)
@login_required
@require_POST
def action_log_view(request, goal_pk):
"""AJAX view for logging actions and updating action log"""
goal = get_object_or_404(Goal, pk=goal_pk, user=request.user)
ActionLogFormSet = modelformset_factory(ActionLog, form=ActionLogForm, extra=goal.actions.count())
action_log_formset = ActionLogFormSet(request.POST)
if not action_log_formset.is_valid():
messages.error(request, action_log_formset.errors)
return redirect('journal:goal_journal', goal_pk=goal_pk)
instances = action_log_formset.save(commit=False)
for action_log in instances:
if action_log.action_status is not None:
action_log.save()
break # we only update one action on the formset at a time
score = goal.calculate_goal_score()
goal.refresh_from_db()
data = {
'action_id': action_log.action.pk,
'goal_score': str(goal.current_score),
'score_calculated_at': score.calculated_at,
'success_range_class': goal.get_success_range_class(),
'action_status': action_log.get_action_status_display(),
'action_status_class': action_log.get_action_status_class(),
'action_logged': render_to_string(
'goals/_action_logged.html',
{'action_recorded': action_log},
request=request
),
'action_log_entry': render_to_string(
'goals/_action_log_item.html',
{'action_recorded': action_log, 'goal': goal},
request=request
),
}
return JsonResponse(data)
@login_required
@require_POST
def delete_action_log_view(request, goal_pk, action_log_pk):
action_log = get_object_or_404(ActionLog, pk=action_log_pk, action__user=request.user)
action_log.delete()
messages.success(request, "You deleted the entry '{}' for '{}' on {} from your action log.".format(
action_log.get_action_status_display(), action_log.action.action,
action_log.status_logged.strftime('%b. %-d, %Y, %-I:%M'))
)
return redirect('goals:goal_detail', goal_pk=goal_pk)
|
from django.shortcuts import render
from django.utils import timezone
from mainbillboard.models import Messages
import json
from django.http import HttpResponse
from .forms import PostForm
# Create your views here.
def get_data(request):
my_result = Messages.objects.order_by('-pub_date')
for post in my_result:
print(str(post))
# posts is the key which contains a list of post objects
return render(request, 'index.html', {"posts": my_result})
def add_post(request):
my_result = Messages.objects.order_by('-pub_date')
# when hitting the form submit button it automatically refreshes the page on
# url of post/blog/ which sends a POST request, so lets check if form is valid
if request.method == "POST":
form = PostForm(request.POST)
# If the form is valid
if form.is_valid():
post = form.save(commit=False)
post.pub_date = timezone.now()
post.save()
return render(request, 'index.html', {"posts": my_result})
# for form post request
else:
form = PostForm()
return render(request, 'index-form.html', {'posts': my_result, 'form': form})
|
import tensorflow as tf
from tensorflow.keras import layers, initializers
class View_Angle_Classfier(tf.keras.Model):
def __init__(self, view_dim):
super(View_Angle_Classfier, self).__init__()
self.f1 = layers.Dense(units = 128, name = "VAC_F1")
self.leakyReLU1 = layers.LeakyReLU(name = "LeakyReLU1")
self.f2 = layers.Dense(units = view_dim)
self.softmax = tf.keras.layers.Softmax()
def call(self, input):
x = self.f1(input)
x = self.leakyReLU1(x)
x = self.f2(x)
x = self.softmax(x)
return x
def model(self, inputsize: int) -> tf.keras.models:
input = tf.keras.Input(shape=inputsize, name='input_layer')
return tf.keras.models.Model(inputs=input, outputs = self.call(input))
|
from dataclasses import dataclass
import io
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import xarray as xr
BLUE = "#4F6DB8"
RED = "#A01914"
ANTHRACITE = "#424242"
SYSTEM_SCALE_COLOR = "k"
AUTARKY_EXTENT_COLOR = "k"
PALETTE = sns.light_palette(BLUE)
HIGHLIGHT_COLOR = ANTHRACITE
HIGHLIGHT_LINEWIDTH = 4
HIGHLIGHT_LINESTYLE = "-"
BASE_SCENARIO = "continental-autarky-100-continental-grid"
DATA_INDEX = """autarky_layer,grid_scale,autarky_degree,cost
Regional,Regional,0%,
Regional,National,0%,
Regional,Continental,0%,
Regional,National,≤15%,
Regional,Continental,≤15%,
Regional,National,≤30%,
Regional,Continental,≤30%,
National,National,0%,
National,Continental,0%,
National,Continental,≤15%,
National,Continental,≤30%,
Continental,Continental,0%,
"""
ONSHORE_WIND_TECHS = ["wind_onshore_competing", "wind_onshore_monopoly"]
PV_TECHS = ["open_field_pv", "roof_mounted_pv"]
VRES_TECHS_WITHOUT_HYDRO = ONSHORE_WIND_TECHS + PV_TECHS + ["wind_offshore"]
VRES_TECHS = VRES_TECHS_WITHOUT_HYDRO + ["hydro_run_of_river"]
SUPPLY_TECHS = VRES_TECHS + ["hydro_reservoir"]
STORAGE_TECHS = ["battery", "hydrogen", "pumped_hydro"]
BALANCING_TECHS = STORAGE_TECHS + ["biofuel"]
AUTARKY_LEVEL_MAP = {
"100": "0%",
"85": "≤15%",
"70": "≤30%"
}
@dataclass
class PlotData:
data: pd.DataFrame
cbar_label: str
fmt: str = '.3g'
annotation_scale: float = None
def composition(path_to_aggregated_results_csv, path_to_aggregated_results_nc, path_to_output,
transmission_capacity_today_twkm, crossborder_capacity_today_tw):
fig = plt.figure(figsize=(6.77, 5.5))
axes = fig.subplots(2, 2).flatten()
plot_datas = read_plot_datas(
path_to_aggregated_results_nc,
path_to_aggregated_results_csv,
transmission_capacity_today_twkm,
crossborder_capacity_today_tw
)
for ax, cbar_ax, plot_data in zip(axes, range(4), plot_datas):
base_case_box(plot_data, ax, cbar_ax)
plt.subplots_adjust(
bottom=0.08,
wspace=0.3,
top=0.95,
hspace=0.2
)
fig.savefig(path_to_output, pil_kwargs={"compression": "tiff_lzw"})
def read_plot_datas(path_to_aggregated_results_nc, path_to_aggregated_results_csv,
transmission_capacity_today_twkm, crossborder_capacity_today_tw):
return [
PlotData(
data=read_total_supply_capacity(path_to_aggregated_results_nc),
cbar_label="A - Supply capacity [TW]",
fmt=".1f"
),
PlotData(
data=read_biostor_capacity(path_to_aggregated_results_nc),
cbar_label="B - Balancing capacity [TW]",
fmt=".2f"
),
PlotData(
data=read_transmission_capacity(path_to_aggregated_results_csv),
cbar_label="C - Transmission capacity [TWkm]",
fmt=".0f",
annotation_scale=transmission_capacity_today_twkm,
),
PlotData(
data=read_international_transmission_capacity(path_to_aggregated_results_csv),
cbar_label="D - Cross-border transmission capacity [TW]",
fmt=".2g",
annotation_scale=crossborder_capacity_today_tw
),
]
def base_case_box(plot_data, ax, cbar_ax):
results = plot_data.data
cbar_ticks = np.linspace(
start=results["cost"].min(),
stop=results["cost"].max(),
num=len(PALETTE) + 1
)
heatmap_data = (
results[results.autarky_degree == "0%"]
.pivot(index="autarky_layer", columns="grid_scale", values="cost")
.reindex(index=["Continental", "National", "Regional"])
)
if plot_data.annotation_scale:
annot = heatmap_data.applymap(
lambda x: f"{{:{plot_data.fmt}}}\n({x / plot_data.annotation_scale:.1f})".format(x)
)
fmt = "s"
else:
annot = True
fmt = plot_data.fmt
sns.heatmap(
heatmap_data,
annot=annot,
cbar=True,
cbar_kws={
"ticks": cbar_ticks,
"format": f"%{plot_data.fmt}",
"aspect": 30,
"shrink": 0.8
},
cmap=PALETTE,
vmin=results["cost"].min(),
vmax=results["cost"].max(),
linewidth=1.25,
square=True,
ax=ax,
fmt=fmt
)
ax.set_xlabel("Balancing scale")
ax.set_ylabel("Supply scale")
ax.set_yticklabels(ax.get_yticklabels(), rotation=90, va="center")
ax.set_title(plot_data.cbar_label, loc="left")
def read_transmission_capacity(path_to_agregrated_results):
da = (
pd
.read_csv(path_to_agregrated_results, index_col=[0, 1])
.to_xarray()
.rename({"Scenario": "scenario"})
.sel(Variable="Capacity|Transmission")
.Value
)
return bring_into_form(da)
def read_international_transmission_capacity(path_to_agregrated_results):
da = (
pd
.read_csv(path_to_agregrated_results, index_col=[0, 1])
.to_xarray()
.rename({"Scenario": "scenario"})
.sel(Variable="Capacity|Gross import national level")
.Value
) / 1e3 # to TW
return bring_into_form(da)
def read_total_supply_capacity(path_to_agregrated_results):
da = (
xr
.open_dataset(path_to_agregrated_results)
.energy_cap
.sel(techs=SUPPLY_TECHS)
.sum(["locs", "techs"])
) / 1e6 # to TW
return bring_into_form(da)
def read_wind_capacity(path_to_agregrated_results):
da = (
xr
.open_dataset(path_to_agregrated_results)
.energy_cap
.sel(techs=ONSHORE_WIND_TECHS + ["wind_offshore"])
.sum(["locs", "techs"])
) / 1e6 # to TW
return bring_into_form(da)
def read_biostor_capacity(path_to_agregrated_results):
da = (
xr
.open_dataset(path_to_agregrated_results)
.energy_cap
.sel(techs=BALANCING_TECHS)
.sum(["locs", "techs"])
) / 1e6 # to TW
return bring_into_form(da)
def bring_into_form(da):
results = (
pd
.read_csv(io.StringIO(DATA_INDEX))
.set_index(["autarky_layer", "grid_scale", "autarky_degree"])
)
for scenario in da.scenario:
scenario = scenario.item()
autarky_layer, autarky_level, grid_size = parse_scenario_name(scenario)
autarky_level = AUTARKY_LEVEL_MAP[autarky_level]
results.loc[autarky_layer, grid_size, autarky_level] = da.sel(scenario=scenario).item()
return results.reset_index()
def parse_scenario_name(scenario_name):
autarky_layer, _, autarky_level, grid_size, _ = scenario_name.split("-")
assert autarky_layer in ["regional", "national", "continental"]
assert grid_size in ["regional", "national", "continental"]
return autarky_layer.capitalize(), autarky_level, grid_size.capitalize()
if __name__ == "__main__":
composition(
path_to_aggregated_results_csv=snakemake.input.results_csv,
path_to_aggregated_results_nc=snakemake.input.results_nc,
transmission_capacity_today_twkm=snakemake.params.transmission_today_twkm,
crossborder_capacity_today_tw=snakemake.params.crossborder_today_tw,
path_to_output=snakemake.output[0]
)
|
"""
.. class:: MultiDark
.. moduleauthor:: Johan Comparat <johan.comparat__at__gmail.com>
The class MultiDark is a wrapper to handle Multidark simulations results / outputs.
"""
import cPickle
import fileinput
import astropy.cosmology as co
import astropy.units as u
c2 = co.Planck13
from scipy.interpolate import interp1d
from os.path import join
import os
import astropy.units as uu
import numpy as n
import glob
class MultiDarkSimulation :
"""
Loads the environement proper to the Multidark simulations. This is the fixed framework of the simulation.
:param Lbox: length of the box in Mpc/h
:param wdir: Path to the multidark lightcone directory
:param boxDir: box directory name
:param snl: list of snapshots available
:param zsl: list of redshift corresponding to the snapshots
:param zArray: redshift array to be considered to interpolate the redshift -- distance conversion
:param Hbox: Hubble constant at redshift 0 of the box
:param Melement: Mass of the resolution element in solar masses.
:param columnDict: dictionnary to convert column name into the index to find it in the snapshots
"""
def __init__(self,Lbox=2500.0 * uu.Mpc, wdir="/data2/DATA/eBOSS/Multidark-lightcones/", boxDir="MD_2.5Gpc", snl=n.array(glob.glob("/data2/DATA/eBOSS/Multidark-lightcones/MD_2.5Gpc/snapshots/hlist_?.?????.list")), zsl=None, zArray=n.arange(0.2,2.4,1e-1), Hbox = 67.77 * uu.km / (uu.s * uu.Mpc), Melement = 23593750000.0 ):
self.Lbox = Lbox # box length
self.Hbox = Hbox # Hubble constant at redshift 0 in the box
self.wdir = wdir # working directory
self.boxDir = boxDir # directory of the box where the snapshots a stored
self.snl = snl # snapshot list
self.zsl = zsl # corresponding redshift list
self.zArray = zArray # redshift for the dC - z conversion
self.Melement = Melement # mass of one particle in the box
self.h = 0.6777
self.omega_lambde = 0.692885
self.omega_matter = 0.307115
self.omega_baryon = 0.048206
self.ns = 0.96
self.sigma8 = 0.8228
self.G = 6.67428 * 10**(-9) # cm3 g-1 s-2
self.Msun = 1.98892 * 10**(33.) # g
self.Npart = 3840
self.force_resolution = 5. # kpc /h
self.columnDict = {'id': 0, 'desc_id': 1, 'mvir': 2, 'vmax': 3, 'vrms': 4, 'rvir': 5, 'rs': 6, 'Np' : 7, 'x': 8, 'y': 9, 'z': 10, 'vx': 11, 'vy': 12, 'vz': 13, 'Jx': 14, 'Jy': 15, 'Jz': 16, 'Spin': 17, 'Rs_Klypin': 18, 'Mmvir_all': 19, 'M200b': 20, 'M200c': 21, 'M500c': 22, 'M2500c': 23,'Xoff': 24, 'Voff': 25, 'Spin_Bullock': 26, 'b_to_a': 27, 'c_to_a': 28, 'Ax': 29, 'Ay': 30, 'Az': 31, 'b_to_a_500c': 32, 'c_to_a_500c': 33, 'Ax_500c': 34, 'Ay_500c': 35, 'Az_500c': 36, 'TU': 37, 'M_pe_Behroozi': 38, 'M_pe_Diemer': 39, 'pid': 40}
if self.boxDir == "MD_0.4Gpc":
self.Melement = 9.63 * 10**7 # Msun
self.vmin = 4* (self.Melement*self.Msun*self.G/(self.force_resolution*u.kpc.to('cm')))**0.5 * u.cm.to('km')
if self.boxDir == "MD_1Gpc_new_rockS":
self.Melement = 1.51 * 10**9. # Msun
self.vmin = 4* (self.Melement*self.Msun*self.G/(self.force_resolution*u.kpc.to('cm')))**0.5 * u.cm.to('km')
if self.boxDir == "MD_2.5Gpc":
self.Melement = 2.359 * 10**10. # Msun
self.vmin = 4* (self.Melement*self.Msun*self.G/(self.force_resolution*u.kpc.to('cm')))**0.5 * u.cm.to('km')
if self.boxDir == "MD_4Gpc":
self.Melement = 9.6 * 10**10. # Msun
self.Npart = 4096
self.vmin = 4* (self.Melement*self.Msun*self.G/(self.force_resolution*u.kpc.to('cm')))**0.5 * u.cm.to('km')
if self.boxDir == "MD_2.5Gpc":
# for satellites ...
self.columnDictHlist = {'scale': 0, 'id': 1, 'desc_scale': 2, 'desc_id': 3, 'num_prog': 4, 'pid': 5, 'upid': 6, 'desc_pid': 7, 'phantom': 8, 'sam_mvir': 9, 'mvir': 10, 'rvir': 11, 'rs': 12, 'vrms': 13, 'mmp?': 14, 'scale_of_last_MM': 15, 'vmax': 16, 'x': 17, 'y': 18, 'z': 19, 'vx': 20, 'vy': 21, 'vz': 22, 'Jx': 23, 'Jy': 24, 'Jz': 25, 'Spin': 26, 'Breadth_first_ID': 27, 'Depth_first_ID': 28, 'Tree_root_ID': 29, 'Orig_halo_ID': 30, 'Snap_num': 31, 'Next_coprogenitor_depthfirst_ID': 32, 'Last_progenitor_depthfirst_ID': 33, 'Last_mainleaf_depthfirst_ID': 34, 'Rs_Klypin': 35, 'Mmvir_all': 36, 'M200b': 37, 'M200c': 38, 'M500c': 39, 'M2500c': 40, 'Xoff': 41, 'Voff': 42, 'Spin_Bullock': 43, 'b_to_a': 44, 'c_to_a': 45, 'Ax': 46, 'Ay': 47, 'Az': 48, 'b_to_a_500c': 49, 'c_to_a_500c': 50, 'Ax_500c': 51, 'Ay_500c': 52, 'Az_500c': 53, 'TU': 54, 'M_pe_Behroozi': 55, 'M_pe_Diemer': 56, 'Halfmass_Radius': 57, 'Macc': 58, 'Mpeak': 59, 'Vacc': 60, 'Vpeak': 61, 'Halfmass_Scale': 62, 'Acc_Rate_Inst': 63, 'Acc_Rate_100Myr': 64, 'Acc_Rate_1Tdyn': 65, 'Acc_Rate_2Tdyn': 66, 'Acc_Rate_Mpeak': 67, 'Mpeak_Scale': 68, 'Acc_Scale': 69, 'First_Acc_Scale': 70, 'First_Acc_Mvir': 71, 'First_Acc_Vmax': 72, 'VmaxatMpeak': 73}
if self.boxDir == "MD_0.4Gpc" or self.boxDir == "MD_1Gpc_new_rockS" :
# for satellites ...
self.columnDictHlist = {'scale': 0, 'id': 1, 'desc_scale': 2, 'desc_id': 3, 'num_prog': 4, 'pid': 5, 'upid': 6, 'desc_pid': 7, 'phantom': 8, 'sam_mvir': 9, 'mvir': 10, 'rvir': 11, 'rs': 12, 'vrms': 13, 'mmp?': 14, 'scale_of_last_MM': 15, 'vmax': 16, 'x': 17, 'y': 18, 'z': 19, 'vx': 20, 'vy': 21, 'vz': 22, 'Jx': 23, 'Jy': 24, 'Jz': 25, 'Spin': 26, 'Breadth_first_ID': 27, 'Depth_first_ID': 28, 'Tree_root_ID': 29, 'Orig_halo_ID': 30, 'Snap_num': 31, 'Next_coprogenitor_depthfirst_ID': 32, 'Last_progenitor_depthfirst_ID': 33, 'Last_mainleaf_depthfirst_ID': 34, 'Rs_Klypin': 35, 'Mmvir_all': 36, 'M200b': 37, 'M200c': 38, 'M500c': 39, 'M2500c': 40, 'Xoff': 41, 'Voff': 42, 'Spin_Bullock': 43, 'b_to_a': 44, 'c_to_a': 45, 'Ax': 46, 'Ay': 47, 'Az': 48, 'b_to_a_500c': 49, 'c_to_a_500c': 50, 'Ax_500c': 51, 'Ay_500c': 52, 'Az_500c': 53, 'TU': 54, 'M_pe_Behroozi': 55, 'M_pe_Diemer': 56, 'Macc': 57, 'Mpeak': 58, 'Vacc': 59, 'Vpeak': 60, 'Halfmass_Scale': 61, 'Acc_Rate_Inst': 62, 'Acc_Rate_100Myr': 63, 'Acc_Rate_1Tdyn': 64, 'Acc_Rate_2Tdyn': 65, 'Acc_Rate_Mpeak': 66, 'Mpeak_Scale': 67, 'Acc_Scale': 68, 'First_Acc_Scale': 69, 'First_Acc_Mvir': 70, 'First_Acc_Vmax': 71, 'VmaxatMpeak': 72}
def get_DF_at_XYZ(x, y, z, path_to_DF, Lbox=1000., gridSize = 2048.):
dL = Lbox/gridSize
sel =( x > dL*(0.5 + ii) ) & ( x < dL*(0.5 + ii + 1) ) & ( y > dL*(0.5 + jj) ) & ( y < dL*(0.5 + jj + 1) ) & ( x > dL*(0.5 + kk) ) & ( z < dL*(0.5 + kk + 1) )
compute :
imax = x/dL - 0.5
imin = x/dL - 0.5 - 1
jmax = y/dL - 0.5
jmin = y/dL - 0.5 - 1
kmax = z/dL - 0.5
kmin = z/dL - 0.5 - 1
f=open(path_to_DF,'r')
qty = n.empty( (Nratio,len(bins)-1) )
data1 = n.fromfile(f,dtype="float64",count=NperBatch) # 512 cube
def computeSingleDistributionFunction(self, ii, name, bins, Mfactor=100. ) :
"""
Extracts the distribution of quantity 'name' out of all snapshots of the Multidark simulation.
:param ii: index of the snapshot in the list self.snl
:param name: name of the quantity of interest, mass, velocity.
:param index: of the quantity of interest in the snapshots.
:param bins: binning scheme to compute the historgram.
:param Mfactor: only halos with Mvir > Mfact* Melement are used.
"""
index = self.columnDict[name]
output_dir = join(self.wdir,self.boxDir,"properties",name)
os.system('mkdir '+ output_dir)
NperBatch = 10000000
qtyCentral = n.empty(NperBatch) # 10M array
qtySat = n.empty(NperBatch) # 10M array
print name, index, output_dir
fl = fileinput.input(self.snl[ii])
nameSnapshot = self.snl[ii].split('/')[-1][:-5]
countCen,countSat,countFileCen,countFileSat = 0,0,0,0
for line in fl:
if line[0] == "#" :
continue
line = line.split()
sat_or_cen = float(line[self.columnDict['pid']])
mv = float(line[self.columnDict['mvir']])
if sat_or_cen != -1 and mv > Mfactor * self.Melement :
countSat+= 1
qtySat[countSat] = float(line[index])
if sat_or_cen == -1 and mv > Mfactor * self.Melement :
countCen+= 1
qtyCentral[countCen] = float(line[index])
if countCen == NperBatch-1 :
nnM,bb = n.histogram(n.log10(qtyCentral),bins = bins)
print "countCen",countCen
f = open(join(output_dir, nameSnapshot + "_" + name + "_Central_" + str(countFileCen)+ ".pkl"),'w')
cPickle.dump(nnM,f)
f.close()
countFileCen+= 1
countCen = 0
qtyCentral = n.empty(NperBatch)
if countSat == NperBatch-1 :
nnM,bb = n.histogram(n.log10(qtySat),bins = bins)
print "countSat", countSat
f = open(join(output_dir, nameSnapshot + "_" + name+ "_Satellite_" + str(countFileSat)+ ".pkl"),'w')
cPickle.dump(nnM,f)
f.close()
countFileSat+= 1
countSat = 0
qtySat = n.empty(NperBatch)
# and for the last batch :
nnM,bb = n.histogram(n.log10(qtyCentral),bins = bins)
f = open(join(output_dir, nameSnapshot + "_" + name +"_Central_" + str(countFileCen)+ ".pkl"),'w')
cPickle.dump(nnM,f)
f.close()
nnM,bb = n.histogram(n.log10(qtySat),bins = bins)
f = open(join(output_dir, nameSnapshot + "_" + name + "_Satellite_" + str(countFileSat)+ ".pkl"),'w')
cPickle.dump(nnM,f)
f.close()
n.savetxt(join(output_dir,name+".bins"),n.transpose([bins]))
def computeDensityFieldDistributionFunction(self, path_to_DF, outputFile, bins ) :
"""
Extracts the distribution of quantity 'name' out of all snapshots of the Multidark simulation.
:param path_to_DF: path to the density field file
:param outputFile: where the histogram iswritten
:param bins: binning scheme to compute the historgram.
"""
NperBatch = 512**3
Ntotal = 2048**3
Nratio = Ntotal / NperBatch
#qtyCentral = n.empty( (64,NperBatch) )
f=open(path_to_DF,'r')
qty = n.empty( (Nratio,len(bins)-1) )
for ii in n.arange(Nratio):
data1 = n.fromfile(f,dtype="float64",count=NperBatch) # 512 cube
nnM,bb = n.histogram(n.log10(data1),bins = bins)
qty[ii] = nnM
n.savetxt(outputFile+".hist",n.transpose([bins[:-1], bins[1:], qty.sum(axis=0) ]), header = " minLogDelta maxLogDelta N")
def computeDensityFieldForHaloNumber(self, path_to_DF, path_to_RS, outputFile, gridSize=2048, subgridSize = 256 ) :
"""
Extracts the distribution of quantity 'name' out of all snapshots of the Multidark simulation.
:param path_to_DF: path to the density field file
:param path_to_RS: path to the rockstar halo catalog file
:param outputFile: where the histogram iswritten
:param bins: binning scheme to compute the historgram.
:param gridSize: grid size from the density field
:param subgridSize: grid size to compute histograms on and write outputs.
"""
#In each cell, average the number of counts in the RS file : Ncen Nsat
dL = 1000./gridSize
NperBatch = subgridSize**3
Ntotal = gridSize**3
Nratio = Ntotal / NperBatch
hf=open(path_to_RS,'r')
DEFINE x, y, z, c_o_s,
REWRITE halos of interest ?
f=open(path_to_DF,'r')
out = n.empty( (subgridSize**3, 3) )
count = 0
countOut = 0
for kk in n.arange(gridSize):
for jj in n.arange(gridSize):
for ii in n.arange(gridSize):
sel =( x > dL*(0.5 + ii) ) & ( x < dL*(0.5 + ii + 1) ) & ( y > dL*(0.5 + jj) ) & ( y < dL*(0.5 + jj + 1) ) & ( x > dL*(0.5 + kz) ) & ( z < dL*(0.5 + kk + 1) )
Nhalos = len(sel.nonzero()[0])
selCen = (sel) & (CONDITION_CEN)
NCentrals = len(selCen.nonzero()[0])
deltaValue = n.fromfile(f,dtype="float64",count=1)
out[count] = n.array([deltaValue, Nhalos, Ncentrals])
if count == subgridSize**3 :
dataAB_tot = n.histogram2d(n.log10(out.T[0]), n.log10(out.T[1]) ,bins = [binsA,binsB])
dataAB_cen = n.histogram2d(n.log10(out.T[0]), n.log10(out.T[2]) ,bins = [binsA,binsB])
f = open(outputFile + "_" +str(countOut)+".pkl" ,'w')
cPickle.dump([binsA,binsB,dataAB_tot, dataAB_cen],f)
f.close()
out = n.empty( (subgridSize**3, 3) )
countOut +=1
count += 1
#GATHER RESULTS
fileList = glob.glob(outputFile + "_*.pkl")
out_all = n.empty( (Nratio, len(binsA)-1, len(binsB)-1) )
out_cen = n.empty( (Nratio, len(binsA)-1, len(binsB)-1) )
for ii, el in enumerate(fileList):
f = open(el ,'r')
binsA,binsB,dataAB_tot, dataAB_cen = cPickle.dump([binsA,binsB,dataAB_tot, dataAB_cen],f)
f.close()
out_all[ii] = dataAB_tot
out_cen[ii] = dataAB_cen
f = open(outputFile + "_all.pkl" ,'w')
cPickle.dump([binsA,binsB,n.sum(out_all, xis=0), n.sum(out_cen, xis=0)],f)
f.close()
def computeDensityFieldHaloCorrelation(self, path_to_DF, path_to_RS, outputFile, bins ) :
"""
Extracts the distribution of quantity 'name' out of all snapshots of the Multidark simulation.
:param path_to_DF: path to the density field file
:param path_to_RS: path to the rockstar halo catalog file
:param outputFile: where the histogram iswritten
:param bins: binning scheme to compute the historgram.
"""
#In each cell, average the number of counts in the RS file : Ncen Nsat
NperBatch = 512**3
Ntotal = 2048**3
Nratio = Ntotal / NperBatch
dL = 1000/2048.
f=open(path_to_DF,'r')
for N in n.arange(Ntotal):
deltaValue = n.fromfile(f,dtype="float64",count=1) # 512 cube
sel =( x > dL*(0.5 + N) ) & ( x < dL*(0.5 + N + 1) )
def combinesSingleDistributionFunction(self, ii, name='Vpeak', bins=10**n.arange(0,3.5,0.01), type = "Central" ) :
"""
Coombines the outputs of computeSingleDistributionFunction.
:param ii: index of the snapshot
:param name: name of the quantity studies
:param bins: bins the histogram was done with
:param type: "Central" or "Satellite"
"""
output_dir = join(self.wdir,self.boxDir,"properties",name)
nameSnapshot = self.snl[ii].split('/')[-1][:-5]
pklList = n.array(glob.glob(join(output_dir, nameSnapshot + "_" + name +"_"+type+"_*.pkl")))
nnM = n.empty( [len(pklList),len(bins)-1] )
for jj in range(len(pklList)):
f=open(pklList[jj],'r')
nnMinter = cPickle.load(f)
nnM[jj] = nnMinter
f.close()
n.savetxt(join(output_dir,"hist-"+type+"-"+name+"-"+nameSnapshot[6:]+".dat"),n.transpose([bins[:-1], bins[1:], nnM.sum(axis=0)]))
def computeDoubleDistributionFunction(self, ii, nameA, nameB, binsA, binsB, Mfactor = 100. ) :
"""
Extracts the distributions of two quantity and their correlation 'name' out of all snapshots of the Multidark simulation.
:param ii: index of the snapshot in the list self.snl
:param name: name of the quantity of interest, mass, velocity.
:param index: of the quantity of interest in the snapshots.
:param bins: binning scheme to compute the historgram.
:param Mfactor: only halos with Mvir > Mfact* Melement are used.
"""
indexA = self.columnDict[nameA]
indexB = self.columnDict[nameB]
output_dir = join(self.wdir,self.boxDir,"properties",nameA+"-"+nameB)
os.system('mkdir '+ output_dir)
NperBatch = 10000000
qtyCentral = n.empty((NperBatch,2)) # 10M array
qtySat = n.empty((NperBatch,2)) # 10M array
print nameA, nameB, indexA, indexB, output_dir
fl = fileinput.input(self.snl[ii])
nameSnapshot = self.snl[ii].split('/')[-1][:-5]
countCen,countSat,countFileCen,countFileSat = 0,0,0,0
for line in fl:
if line[0] == "#" :
continue
line = line.split()
sat_or_cen = float(line[self.columnDict['pid']])
mv = float(line[self.columnDict['mvir']])
if sat_or_cen != -1 and mv > Mfactor * self.Melement :
countSat+= 1
qtySat[countSat] = float(line[indexA]),float(line[indexB])
if sat_or_cen == -1 and mv > Mfactor * self.Melement :
countCen+= 1
qtyCentral[countCen] = float(line[indexA]),float(line[indexB])
if countCen == NperBatch-1 :
nnA,bbA = n.histogram(n.log10(qtyCentral.T[0]),bins = binsA)
nnB,bbB = n.histogram(n.log10(qtyCentral.T[1]),bins = binsB)
dataAB = n.histogram2d(n.log10(qtyCentral.T[0]), n.log10(qtyCentral.T[1]) ,bins = [binsA,binsB])
print "countCen",countCen
f = open(join(output_dir, nameSnapshot + "_" + nameA+"-"+nameB + "_Central_" + str(countFileCen)+ ".pkl"),'w')
cPickle.dump([nnA,nnB,dataAB],f)
f.close()
countFileCen+= 1
countCen = 0
qtyCentral = n.empty((NperBatch,2))
if countSat == NperBatch-1 :
nnA,bbA = n.histogram(n.log10(qtySat.T[0]),bins = binsA)
nnB,bbB = n.histogram(n.log10(qtySat.T[1]),bins = binsB)
dataAB = n.histogram2d(n.log10(qtySat.T[0]), n.log10(qtySat.T[1]) ,bins = [binsA,binsB])
print "countSat", countSat
f = open(join(output_dir, nameSnapshot + "_" + nameA+"-"+nameB+ "_Satellite_" + str(countFileSat)+ ".pkl"),'w')
cPickle.dump([nnA,nnB,dataAB],f)
f.close()
countFileSat+= 1
countSat = 0
qtySat = n.empty((NperBatch,2))
# and for the last batch :
nnA,bbA = n.histogram(n.log10(qtyCentral.T[0]),bins = binsA)
nnB,bbB = n.histogram(n.log10(qtyCentral.T[1]),bins = binsB)
dataAB = n.histogram2d(n.log10(qtyCentral.T[0]), n.log10(qtyCentral.T[1]) ,bins = [binsA,binsB])
print "countCen",countCen
f = open(join(output_dir, nameSnapshot + "_" + nameA+"-"+nameB + "_Central_" + str(countFileCen)+ ".pkl"),'w')
cPickle.dump([nnA,nnB,dataAB],f)
f.close()
nnA,bbA = n.histogram(n.log10(qtySat.T[0]),bins = binsA)
nnB,bbB = n.histogram(n.log10(qtySat.T[1]),bins = binsB)
dataAB = n.histogram2d(n.log10(qtySat.T[0]), n.log10(qtySat.T[1]) ,bins = [binsA,binsB])
print "countSat", countSat
f = open(join(output_dir, nameSnapshot + "_" + nameA+"-"+nameB+ "_Satellite_" + str(countFileSat)+ ".pkl"),'w')
cPickle.dump([nnA,nnB,dataAB],f)
f.close()
n.savetxt(join(output_dir,nameA+".bins"),n.transpose([binsA]))
n.savetxt(join(output_dir,nameB+".bins"),n.transpose([binsB]))
def combinesDoubleDistributionFunction(self, ii, nameA, nameB, binsA, binsB, type = "Central" ) :
"""
Coombines the outputs of computeDoubleDistributionFunction.
:param ii: index of the snapshot
:param name: name of the quantity studies
:param bins: bins the histogram was done with
:param type: "Central" or "Satellite"
"""
output_dir = join(self.wdir,self.boxDir,"properties",nameA+"-"+nameB)
nameSnapshot = self.snl[ii].split('/')[-1][:-5]
pklList = n.array(glob.glob(join(output_dir, nameSnapshot + "_" + nameA+"-"+nameB +"_"+type+"_*.pkl")))
nnA = n.empty( [len(pklList),len(binsA)-1] )
nnB = n.empty( [len(pklList),len(binsB)-1] )
dataAB = n.empty( [len(pklList),len(binsA)-1,len(binsB)-1] )
for jj in range(len(pklList)):
f=open(pklList[jj],'r')
nnAinter, nnBinter, dataABinter = cPickle.load(f)
nnA[jj] = nnAinter
nnB[jj] = nnBinter
dataAB[jj] = dataABinter[0]
f.close()
n.savetxt(join(output_dir,"hist-"+type+"-"+nameA+"-"+nameSnapshot[6:]+".dat"),n.transpose([binsA[:-1], binsA[1:], nnA.sum(axis=0)]))
n.savetxt(join(output_dir,"hist-"+type+"-"+nameB+"-"+nameSnapshot[6:]+".dat"),n.transpose([binsB[:-1], binsB[1:], nnB.sum(axis=0)]))
n.savetxt(join(output_dir, "hist2d-"+type+"-"+ nameA+"-"+nameB + "-"+ nameSnapshot[6:] + ".dat"), dataAB.sum(axis=0))
def computeMassVelocityConcentrationFunction(self,ii) :
"""
DO NOT USE
computes the mass, velocity and concentration histograms for a rockstar snapshot.
:param ii: index of the snapshot in the list self.snl
# does not work any more
DO NOT USE
"""
massB = n.arange(8,16,0.01)
vcirB = n.arange(0,4.5,0.01)
concB = n.arange(1,3,0.1)
NperBatch = 10000000
mvcCentralMatrix = n.empty((NperBatch,3)) # 1M matrixes
mvcSatMatrix = n.empty((NperBatch,3)) # 1 M matrixes
fl = fileinput.input(self.snl[ii])
name = self.snl[ii].split('/')[-1][:-5]
countCen,countSat,countFileCen,countFileSat = 0,0,0,0
for line in fl:
if line[0] == "#" :
continue
line = line.split()
sat_or_cen = float(line[5])
if sat_or_cen != -1 :
countSat+= 1
mvcSatMatrix[countSat] = float(line[10]), float(line[16]), float(line[11])
if sat_or_cen == -1 :
countCen+= 1
mvcCentralMatrix[countCen] = float(line[10]), float(line[16]), float(line[11])
if countCen == NperBatch-1 :
nnM,bb = n.histogram(n.log10(mvcCentralMatrix.T[0]),bins = massB)
nnV,bb = n.histogram(n.log10(mvcCentralMatrix.T[1]),bins = vcirB)
nnC,bb = n.histogram(n.log10(mvcCentralMatrix.T[2]),bins = concB)
dataMC = n.histogram2d(n.log10(mvcCentralMatrix.T[0]), mvcCentralMatrix.T[2] ,bins = [massB,concB])
dataVC = n.histogram2d(n.log10(mvcCentralMatrix.T[1]), mvcCentralMatrix.T[2] , bins = [vcirB,concB])
print "countCen",countCen
f = open(join(self.wdir,self.boxDir,"properties", name+"_MVRmatrixCentral_" +str(countFileCen)+ ".pkl"),'w')
cPickle.dump([nnM,nnV,nnC,dataMC,dataVC],f)
f.close()
countFileCen+= 1
countCen = 0
if countSat == NperBatch-1 :
nnM,bb = n.histogram(n.log10(mvcSatMatrix.T[0]),bins = massB)
nnV,bb = n.histogram(n.log10(mvcSatMatrix.T[1]),bins = vcirB)
nnC,bb = n.histogram(n.log10(mvcSatMatrix.T[2]),bins = concB)
dataMC = n.histogram2d(n.log10(mvcSatMatrix.T[0]), mvcSatMatrix.T[2] ,bins = [massB,concB])
dataVC = n.histogram2d(n.log10(mvcSatMatrix.T[1]), mvcSatMatrix.T[2] , bins = [vcirB,concB])
print "countSat", countSat
f = open(join(self.wdir,self.boxDir ,"properties" ,
name+"_MVRmatrixSatellite_" +str(countFileSat)+ ".pkl"),'w')
cPickle.dump([nnM,nnV,nnC,dataMC,dataVC],f)
f.close()
countFileSat+= 1
countSat = 0
# and for the last batch :
nnM,bb = n.histogram(n.log10(mvcCentralMatrix.T[0]),bins = massB)
nnV,bb = n.histogram(n.log10(mvcCentralMatrix.T[1]),bins = vcirB)
nnC,bb = n.histogram(n.log10(mvcCentralMatrix.T[2]),bins = concB)
dataMC = n.histogram2d(n.log10(mvcCentralMatrix.T[0]), mvcCentralMatrix.T[2] ,bins = [massB,concB])
dataVC = n.histogram2d(n.log10(mvcCentralMatrix.T[1]), mvcCentralMatrix.T[2] , bins = [vcirB,concB])
f = open(join(self.wdir,self.boxDir,"properties",name+ "_MVRmatrixCentral_" +str(countFileCen)+ ".pkl"),'w')
cPickle.dump([nnM,nnV,nnC,dataMC,dataVC],f)
f.close()
nnM,bb = n.histogram(n.log10(mvcSatMatrix.T[0]),bins = massB)
nnV,bb = n.histogram(n.log10(mvcSatMatrix.T[1]),bins = vcirB)
nnC,bb = n.histogram(n.log10(mvcSatMatrix.T[2]),bins = concB)
dataMC = n.histogram2d(n.log10(mvcSatMatrix.T[0]), mvcSatMatrix.T[2] ,bins = [massB,concB])
dataVC = n.histogram2d(n.log10(mvcSatMatrix.T[1]), mvcSatMatrix.T[2] , bins = [vcirB,concB])
f = open(join(self.wdir,self.boxDir,"properties",name+ "_MVRmatrixSatellite_" +str(countFileSat)+ ".pkl"),'w')
cPickle.dump([nnM,nnV,nnC,dataMC,dataVC],f)
f.close()
def computeMassVelocityPeakAccRateFunctions(self,ii) :
"""
DO NOT USE
computes the mass, velocity and concentration histograms for a rockstar snapshot.
:param ii: index of the snapshot in the list self.snl()
DO NOT USE
"""
massB = n.arange(8,16,0.01)
vcirB = n.arange(0,4.5,0.01)
concB = n.arange(-5e4,5e4+1,1e3)
NperBatch = 10000000
mvcCentralMatrix = n.empty((NperBatch,3)) # 1M matrixes
mvcSatMatrix = n.empty((NperBatch,3)) # 1 M matrixes
fl = fileinput.input(self.snl[ii])
name = self.snl[ii].split('/')[-1][:-5]
countCen,countSat,countFileCen,countFileSat = 0,0,0,0
for line in fl:
if line[0] == "#" :
continue
line = line.split()
sat_or_cen = float(line[5])
if sat_or_cen != -1 :
countSat+= 1
#print mvcSatMatrix[countSat]
#print line[59], line[61], line[67]
mvcSatMatrix[countSat] = float(line[59]), float(line[61]), float(line[67]) # check the right indices ... MASS velocity concentration
if sat_or_cen == -1 :
countCen+= 1
#print mvcCentralMatrix[countCen]
#print line[59], line[61], line[67]
mvcCentralMatrix[countCen] = float(line[59]), float(line[61]), float(line[67]) # check the right indices ... MASS velocity concentration
if countCen == NperBatch-1 :
nnM,bb = n.histogram(n.log10(mvcCentralMatrix.T[0]),bins = massB)
nnV,bb = n.histogram(n.log10(mvcCentralMatrix.T[1]),bins = vcirB)
nnC,bb = n.histogram(n.log10(mvcCentralMatrix.T[2]),bins = concB)
dataMC = n.histogram2d(n.log10(mvcCentralMatrix.T[0]), mvcCentralMatrix.T[2] ,bins = [massB,concB])
dataVC = n.histogram2d(n.log10(mvcCentralMatrix.T[1]), mvcCentralMatrix.T[2] , bins = [vcirB,concB])
print "countCen",countCen
f = open(join(self.wdir,self.boxDir,"properties", name+"_MVAmatrixCentral_" +str(countFileCen)+ ".pkl"),'w')
cPickle.dump([nnM,nnV,nnC,dataMC,dataVC],f)
f.close()
countFileCen+= 1
countCen = 0
if countSat == NperBatch-1 :
nnM,bb = n.histogram(n.log10(mvcSatMatrix.T[0]),bins = massB)
nnV,bb = n.histogram(n.log10(mvcSatMatrix.T[1]),bins = vcirB)
nnC,bb = n.histogram(n.log10(mvcSatMatrix.T[2]),bins = concB)
dataMC = n.histogram2d(n.log10(mvcSatMatrix.T[0]), mvcSatMatrix.T[2] ,bins = [massB,concB])
dataVC = n.histogram2d(n.log10(mvcSatMatrix.T[1]), mvcSatMatrix.T[2] , bins = [vcirB,concB])
print "countSat", countSat
f = open(join(self.wdir,self.boxDir ,"properties" ,
name+"_MVAmatrixSatellite_" +str(countFileSat)+ ".pkl"),'w')
cPickle.dump([nnM,nnV,nnC,dataMC,dataVC],f)
f.close()
countFileSat+= 1
countSat = 0
# and for the last batch :
nnM,bb = n.histogram(n.log10(mvcCentralMatrix.T[0]),bins = massB)
nnV,bb = n.histogram(n.log10(mvcCentralMatrix.T[1]),bins = vcirB)
nnC,bb = n.histogram(n.log10(mvcCentralMatrix.T[2]),bins = concB)
dataMC = n.histogram2d(n.log10(mvcCentralMatrix.T[0]), mvcCentralMatrix.T[2] ,bins = [massB,concB])
dataVC = n.histogram2d(n.log10(mvcCentralMatrix.T[1]), mvcCentralMatrix.T[2] , bins = [vcirB,concB])
f = open(join(self.wdir,self.boxDir,"properties",name+ "_MVAmatrixCentral_" +str(countFileCen)+ ".pkl"),'w')
cPickle.dump([nnM,nnV,nnC,dataMC,dataVC],f)
f.close()
nnM,bb = n.histogram(n.log10(mvcSatMatrix.T[0]),bins = massB)
nnV,bb = n.histogram(n.log10(mvcSatMatrix.T[1]),bins = vcirB)
nnC,bb = n.histogram(n.log10(mvcSatMatrix.T[2]),bins = concB)
dataMC = n.histogram2d(n.log10(mvcSatMatrix.T[0]), mvcSatMatrix.T[2] ,bins = [massB,concB])
dataVC = n.histogram2d(n.log10(mvcSatMatrix.T[1]), mvcSatMatrix.T[2] , bins = [vcirB,concB])
f = open(join(self.wdir,self.boxDir,"properties",name+ "_MVAmatrixSatellite_" +str(countFileSat)+ ".pkl"),'w')
cPickle.dump([nnM,nnV,nnC,dataMC,dataVC],f)
f.close()
|
#!/usr/bin/env python3
#
# Copyright (c) 2020 Fondazione Bruno Kessler
# Author(s): Cristina Costa (ccosta@fbk.eu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""LoRaWAN End Device Handlers."""
# TODO ADD REFERENCE TO ALLOWED DEVICES
import empower.managers.apimanager.apimanager as apimanager
from empower.managers.lommmanager.datatypes.eui64 import EUI64
class LEndDevsHandler(apimanager.EmpowerAPIHandler):
"""Handler for accessing LoRaWAN End Devices."""
URLS = [r"/api/v1/lns/lenddevs/?",
r"/api/v1/lns/lenddevs/([a-zA-Z0-9:]*)/?"]
@apimanager.validate(max_args=1)
def get(self, *args, **kwargs):
"""List LoRaWAN End devices.
Args:
[0]: devEUI (optional)
Example URLs:
GET /api/v1/lns/lenddevs
[
{
"devEUI": "0028A154B89172D2"
"devAddr": "0028A154B89172D2",
"desc": "End Device XXX"
}
]
GET /api/v1/lns/lenddevs/00:28:A1:54:B8:91:72:D2
{
"devAddr": "0028A154B89172D2",
"desc": "End Device XXX"
}
"""
if not args:
out = []
for key in self.service.lenddevs:
out.append(self.service.lenddevs[key].to_dict())
return out
else:
try:
devEUI = EUI64(args[0]).eui
except ValueError as err:
self.set_status(400)
self.finish({"status_code":400,"title":"devEUI wrong format","detail":str(err)})
return self.service.lenddevs[devEUI].to_dict()
@apimanager.validate(returncode=201, min_args=0, max_args=0)
def post(self, *args, **kwargs):
"""Add a new LoRaWAN end device.
Args:
[0]: devEUI
Request:
version: protocol version (1.0)
desc: a human readable description of the device (optional)
Example URLs:
POST /api/v1/lns/lenddevs/00:28:A1:54:B8:91:72:D2
{
"version":"1.0",
"desc": "LoRaWAN End Device"
"joinEUI": joinEUI
"appKey": cryptographic application key
"nwkKey": cryptographic network key
"appSKey": cryptographic session application key
"nwkSKey": cryptographic session network key
[..]
}
"""
try:
lenddev = self.service.add_lenddev(args[0], **kwargs)
except:
raise
else:
self.set_header("Location", "/api/v1/lns/lenddevs/%s" % lenddev.devEUI)
@apimanager.validate(returncode=204, min_args=0, max_args=1)
def delete(self, *args, **kwargs):
"""Delete one or all LoRaWAN end devices.
Args:
[0]: devEUI
Example URLs:
DELETE /api/v1/lns/lenddevs
DELETE /api/v1/lns/lenddevs/00:28:A1:54:B8:91:72:D2
"""
if args:
self.service.remove_lenddev(EUI64(args[0]))
else:
self.service.remove_all_lenddevs()
|
from time import time
import numpy as np
import biorbd_casadi as biorbd
from bioptim import (
PlotType,
InitialGuess,
InterpolationType,
PenaltyNode,
Node,
OptimalControlProgram,
ConstraintList,
ConstraintFcn,
ObjectiveFcn,
ObjectiveList,
DynamicsList,
DynamicsFcn,
BiMappingList,
BoundsList,
QAndQDotBounds,
InitialGuessList,
PhaseTransitionList,
PhaseTransitionFcn,
Axis,
)
from casadi import vertcat, if_else, lt
def com_dot_z(nodes: PenaltyNode):
nlp = nodes.nlp
x = nodes.x
q = nlp.mapping["q"].to_second.map(x[0][: nlp.shape["q"]])
qdot = nlp.mapping["q"].to_second.map(x[0][nlp.shape["q"]:])
com_dot_func = biorbd.to_casadi_func("Compute_CoM_dot", nlp.model.CoMdot, nlp.q, nlp.qdot)
com_dot = com_dot_func(q, qdot)
return com_dot[2]
def tau_actuator_constraints(nodes: PenaltyNode, minimal_tau=None):
nlp = nodes.nlp
nq = nlp.mapping["q"].to_first.len
q = [nlp.mapping["q"].to_second.map(mx[:nq]) for mx in nodes.x]
qdot = [nlp.mapping["qdot"].to_second.map(mx[nq:]) for mx in nodes.x]
min_bound = []
max_bound = []
func = biorbd.to_casadi_func("torqueMax", nlp.model.torqueMax, nlp.q, nlp.qdot)
for i in range(len(nodes.u)):
bound = func(q[i], qdot[i])
if minimal_tau:
min_bound.append(
nlp.mapping["tau"].to_first.map(if_else(lt(bound[:, 1], minimal_tau), minimal_tau, bound[:, 1]))
)
max_bound.append(
nlp.mapping["tau"].to_first.map(if_else(lt(bound[:, 0], minimal_tau), minimal_tau, bound[:, 0]))
)
else:
min_bound.append(nlp.mapping["tau"].to_first.map(bound[:, 1]))
max_bound.append(nlp.mapping["tau"].to_first.map(bound[:, 0]))
obj = vertcat(*nodes.u)
min_bound = vertcat(*min_bound)
max_bound = vertcat(*max_bound)
return (
vertcat(np.zeros(min_bound.shape), np.ones(max_bound.shape) * -np.inf),
vertcat(obj + min_bound, obj - max_bound),
vertcat(np.ones(min_bound.shape) * np.inf, np.zeros(max_bound.shape)),
)
def prepare_ocp(
ns,
init=None,
biorbd_model_1contact: str = "jumper1contactsutil.bioMod", # ../../optimization_biodbO
biorbd_model_0contact: str = "jumper0contactsutil.bioMod",
) -> OptimalControlProgram:
# Model path
biorbd_model = (
biorbd.Model(biorbd_model_1contact),
biorbd.Model(biorbd_model_0contact),
)
# Problem parameters
n_shooting = (30, 30)
final_time = (1, 1)
time_min = 0.1, 0.2
time_max = 2, 3
n_phases = len(biorbd_model) # nombre d'objets dans biorbd_model
tau_min, tau_max, tau_init = -1000, 1000, 0
n_q, n_qdot, n_tau = biorbd_model[0].nbQ(), biorbd_model[0].nbQdot(), biorbd_model[0].nbGeneralizedTorque()
mapping = BiMappingList()
# mapping.add("q", [0, 1, 2, 3, 4, 5, None, 6, None, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
# [0, 1, 2, 3, 4, 5, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
mapping.add("tau", [None, None, None, None, None, None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], #relie au u_bounds
[6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]) # comme on n'a pas écrit la phase c'est par defaut la phase 0 qui est rempli donc la phase 1 n'existe pas
# ddl0 :translation pelvis x # ddl4 :rotation Pelvis y # ddl8 :rotation Tronc z # ddl12 :rotation BrasG x # ddl16 :rotation CuisseG x
# ddl1 :translation pelvis y # ddl5 :rotation Pelvis z # ddl9 :rotation BrasD z # ddl13 :rotation CuisseD x # ddl17 :rotation JambeG x
# ddl2 :translation pelvis z # ddl6 :rotation Tronc x # ddl10 :rotation BrasD x # ddl14 :rotation JambeD x # ddl18 :rotation PiedG x
# ddl3 :rotation pelvis x # ddl7 :rotation Tronc y # ddl11 :rotation BrasG z # ddl15 :rotation PiedD x
# Add objective functions
objective_functions = ObjectiveList()
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau", weight=100, phase=0)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau", weight=100, phase=1)
# Dynamics
dynamics = DynamicsList()
dynamics.add(DynamicsFcn.TORQUE_DRIVEN, with_contact=True) # Toe only phase 0
dynamics.add(DynamicsFcn.TORQUE_DRIVEN) # Aerial phase phase 1
# Floor constraints
# Do not pull on floor
constraints = ConstraintList()
# Positivity of CoM_dot on z axis prior the take-off
constraints.add(com_dot_z, phase=0, node=Node.END, min_bound=0, max_bound=np.inf) #v centre de masse positive au decollage
# Maximize the jump height
objective_functions = ObjectiveList()
objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_PREDICTED_COM_HEIGHT, weight=-100, phase=1)
for p in range(0, n_phases):
objective_functions.add(
ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, weight=0.1, phase=p, index=range(7, 19) # on exclue la racine
)
# Minimize time of the phase
objective_functions.add(
ObjectiveFcn.Mayer.MINIMIZE_TIME, weight=0.1, phase=0, min_bound=time_min[0], max_bound=time_max[0]
)
constraints.add(ConstraintFcn.TRACK_SEGMENT_WITH_CUSTOM_RT, node=Node.ALL, segment="seg_rt", rt=0)
constraints.add(ConstraintFcn.TRACK_CONTACT_FORCES, phase=0, node=Node.ALL, contact_force_idx=1, max_bound=np.inf)
# Non-slipping constraints
constraints.add( # toe only, on only one of the feet
ConstraintFcn.NON_SLIPPING,
phase=0,
node=Node.ALL,
normal_component_idx=1,
tangential_component_idx=0,
static_friction_coefficient=0.5,
)
# Torque constrained to torqueMax
constraints.add(tau_actuator_constraints, phase=0, node=Node.ALL, minimal_tau=20)
constraints.add(tau_actuator_constraints, phase=1, node=Node.ALL, minimal_tau=20)
# Path constraint
x_bounds = BoundsList()
x_bounds.add(bounds=QAndQDotBounds(biorbd_model[0])) # dof_mappings=mapping[0]
x_bounds.add(bounds=QAndQDotBounds(biorbd_model[1])) # dof_mappings=mapping[0] et pas 1 même si pour phase 1
pose_at_first_node = [0] * biorbd_model[0].nbQ()
x_bounds[0].min[:, 0] = pose_at_first_node + [0] * n_qdot
x_bounds[0].max[:, 0] = pose_at_first_node + [0] * n_qdot
u_bounds = BoundsList()
u_bounds.add([-500] * len(mapping[0]["tau"].to_first), [500] * len(mapping[0]["tau"].to_first)) #donne 13 #u_bounds.add([-500] * n_tau, [500] * n_tau)
u_bounds.add([-500] * len(mapping[0]["tau"].to_first), [500] * len(mapping[0]["tau"].to_first)) #car pas ecrit de phase donc phase 0 rempli par defaut
# Initial guess
x_init = InitialGuessList()
u_init = InitialGuessList()
# x_init.add([0] * (biorbd_model[0].nbQ() + biorbd_model[0].nbQdot()))
# x_init.add([1] * (biorbd_model[1].nbQ() + biorbd_model[1].nbQdot()))
# # Initial guesses
# x_init.add(pose_at_first_node + [0] * n_qdot)
# x_init.add(pose_at_first_node + [1] * n_qdot)
# x_init.add(InitialGuess(np.random.random((n_q + n_qdot, ns[0] + 1)), interpolation=InterpolationType.EACH_FRAME))
# x_init.add(InitialGuess(np.random.random((n_q + n_qdot, ns[0] + 1)), interpolation=InterpolationType.EACH_FRAME))
x_init.add(InitialGuess(pose_at_first_node + [0] * n_qdot, interpolation=InterpolationType.CONSTANT))
x_init.add(InitialGuess(pose_at_first_node + [0] * n_qdot, interpolation=InterpolationType.CONSTANT)) #liste de 0 et non phase [0]
if init is not None:
u_init.add(init) # avec le warm start
else:
# u_init.add([0] * len(mapping[0]["tau"])) #u_init.add([1] * n_tau)
# u_init.add([1] * len(mapping[0]["tau"]))
u_init.add([0] * len(mapping[0]["tau"].to_first))
u_init.add([1] * len(mapping[0]["tau"].to_first))
# Phase transition
phase_transitions = PhaseTransitionList()
phase_transitions.add(PhaseTransitionFcn.CONTINUOUS, phase_pre_idx=0)
objective_functions = ObjectiveList()
objective_functions.add(
ObjectiveFcn.Mayer.TRACK_STATE, phase=1, node=Node.END, index=2, target=np.ones((1, 1)) * -1 # fini à -1m
)
return OptimalControlProgram(
biorbd_model,
dynamics,
n_shooting,
final_time,
x_init,
u_init,
x_bounds,
u_bounds,
n_threads=4,
variable_mappings=mapping,
)
# Run optimizations
tic = time()
ocp = prepare_ocp(ns=(60,50),
biorbd_model_1contact="jumper1contactsutil.bioMod",
biorbd_model_0contact="jumper1contactsutil.bioMod")
# --- Solve the ocp --- #
sol = ocp.solve(show_online_optim=True, solver_options={'ipopt.max_iter':3}) #normalement 'îpopt.max_iter':1}
sol.graphs()
# --- Show the results in a bioviz animation --- #
sol.animate()
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from functools import partial
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import def_function
from tensorflow.python.eager.backprop import GradientTape
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
from tensorflow.python import keras
from tensorflow.python import ipu
from tensorflow.python.ipu import loops
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.python.ipu import ipu_strategy
from tensorflow.python.ops import math_ops
from tensorflow.python.ipu.keras.optimizers import AutomaticLossScalingOptimizer
from tensorflow.python.ipu.keras.optimizers import CrossReplicaOptimizer
from tensorflow.python.ipu.keras.optimizers import MapGradientOptimizerInvertedChaining as MGOIC
from tensorflow.python.ipu.keras.optimizers import IpuOptimizer
from tensorflow.python.framework.constant_op import constant as tf_constant
from tensorflow.python.ipu.keras.optimizers import GradientAccumulationOptimizer
from tensorflow.python.keras.optimizer_v2 import gradient_descent
NUM_IPUS = 2
SGD_LEARNING_RATE = 0.01
NUM_SAMPLES = 128
INITIAL_WEIGHT_VALUE = 5.0
NUM_DENSE_UNITS = 1
DATA_VALUE = 2.0
def data_fn():
return [np.full((NUM_SAMPLES, 1), DATA_VALUE, np.single)] * 2
def map_fn_quadratic(grad, _):
return math_ops.square(grad)
def map_fn_add(grad, _):
h = tf_constant(([10.0]))
return math_ops.add(grad, h)
def map_fn_divide(grad, _):
h = tf_constant([2.0])
return math_ops.divide(grad, h)
def sgd():
return gradient_descent.SGD(SGD_LEARNING_RATE)
def cross_replica_opt_fn():
return CrossReplicaOptimizer(sgd())
def mgoic_opt_fn(f):
return MGOIC(sgd(), f)
def als_opt_fn():
return AutomaticLossScalingOptimizer(sgd())
def dense_layer_fn():
return keras.layers.Dense(
NUM_DENSE_UNITS,
use_bias=False,
kernel_initializer=keras.initializers.Constant(INITIAL_WEIGHT_VALUE))
def sequential_model_fn(optimizer_fn, num_update_steps=1):
m = keras.Sequential([dense_layer_fn()])
l = keras.losses.MeanSquaredError(reduction="sum")
m.compile(loss=l,
optimizer=optimizer_fn(),
steps_per_execution=num_update_steps)
return m
TEST_CASES = [{
'testcase_name': 'CrossReplicaOptimizer',
'optimizer_fn': cross_replica_opt_fn,
}, {
'testcase_name': 'MGOICAdd',
'optimizer_fn': partial(mgoic_opt_fn, map_fn_add),
}, {
'testcase_name': 'AutomaticLossScalingOptimizer',
'optimizer_fn': als_opt_fn,
}]
class KerasV2OptimizersTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def get_model_weight(self, m):
w = m.get_weights()
self.assertEqual(len(w), 1)
w = w[0]
return w.item()
def verify_loss_decreases(self, losses):
self.assertGreater(len(losses), 1)
losses.reverse()
last_loss = losses[0]
for l in losses[1:]:
self.assertLess(l, last_loss)
# The cross replica optimizer is used specifically for IPU's to sum gradients
# across the replicas. This should produce the exact same result as simply
# summing across the batch with the unadjusted optimizer
@unittest.skip("Test does not pass internally.")
@tu.test_uses_ipus(num_ipus=NUM_IPUS, allow_ipu_model=False)
@test_util.run_v2_only
def testCrossReplicaOptimizer(self):
cfg = IPUConfig()
cfg.auto_select_ipus = NUM_IPUS
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
strategy = ipu_strategy.IPUStrategyV1()
steps = 10
batch_size = 2
with strategy.scope():
m = sequential_model_fn(cross_replica_opt_fn, 2)
m.fit(*data_fn(), steps_per_epoch=steps, epochs=1, batch_size=batch_size)
cpu_model = sequential_model_fn(cross_replica_opt_fn)
cpu_model.fit(*data_fn(),
steps_per_epoch=steps / NUM_IPUS,
epochs=1,
batch_size=NUM_IPUS * batch_size)
self.assertEqual(m.get_weights(), cpu_model.get_weights())
@test_util.run_v2_only
def testMapGradientOptimizer(self):
for quad_optimizer in [
lambda: IpuOptimizer(MGOIC(sgd(), map_fn_quadratic)),
lambda: MGOIC(sgd(), map_fn_quadratic)
]:
cfg = IPUConfig()
cfg.auto_select_ipus = 1
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
strategy = ipu_strategy.IPUStrategyV1()
with strategy.scope():
m = sequential_model_fn(quad_optimizer)
m.fit(*data_fn(),
steps_per_epoch=1,
epochs=1,
batch_size=1,
verbose=False)
grad = (2 * DATA_VALUE * ((INITIAL_WEIGHT_VALUE * DATA_VALUE) -
(DATA_VALUE)))
expected = INITIAL_WEIGHT_VALUE - (SGD_LEARNING_RATE * (grad**2))
self.assertAllCloseAccordingToType(self.get_model_weight(m), expected)
@test_util.run_v2_only
def testMapGradientOptimizerNested(self):
for quad_optimizer in [
lambda: MGOIC(MGOIC(sgd(), map_fn_add), map_fn_quadratic)
]:
cfg = IPUConfig()
cfg.auto_select_ipus = 1
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
strategy = ipu_strategy.IPUStrategyV1()
with strategy.scope():
m = sequential_model_fn(quad_optimizer)
m.fit(*data_fn(),
steps_per_epoch=1,
epochs=1,
batch_size=1,
verbose=False)
grad = (2 * DATA_VALUE * ((INITIAL_WEIGHT_VALUE * DATA_VALUE) -
(DATA_VALUE)))
expected = INITIAL_WEIGHT_VALUE - (SGD_LEARNING_RATE *
((grad**2) + 10))
self.assertAllCloseAccordingToType(self.get_model_weight(m), expected)
@unittest.skip("T42094 - MapGradientOptimizer needs fixing.")
@tu.test_uses_ipus(num_ipus=NUM_IPUS, allow_ipu_model=False)
@test_util.run_v2_only
def testMappedAndCross(self):
# test that _keras optimizer wrapper still works with default optimizers
add_optimizer = CrossReplicaOptimizer(MGOIC(sgd(), map_fn_add))
cfg = IPUConfig()
cfg.auto_select_ipus = NUM_IPUS
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
strategy = ipu_strategy.IPUStrategyV1()
with strategy.scope():
m = sequential_model_fn(lambda: add_optimizer, 2)
m.fit(*data_fn(), steps_per_epoch=2, epochs=1, batch_size=1)
#grad = (2 * DATA_VALUE * ((INITIAL_WEIGHT_VALUE * DATA_VALUE) - (DATA_VALUE)))
#expected = INITIAL_WEIGHT_VALUE - (SGD_LEARNING_RATE * (grad + 10))
# re enable when T36442 is fixed
#self.assertAllCloseAccordingToType(self.get_model_weight(m), expected)
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testCreateFromConfig(self, optimizer_fn, **kwargs): # pylint: disable=unused-argument
opt_1 = optimizer_fn()
opt_1_config = opt_1.get_config()
opt_2_config = opt_1_config.copy()
opt_2_config['name'] += "_copy"
opt_2 = opt_1.__class__.from_config(opt_2_config)
self.assertEqual(opt_2.get_config(), opt_2_config)
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testWeightsPropertyRead(self, optimizer_fn, **kwargs): # pylint: disable=unused-argument
opt = optimizer_fn()
w = opt.weights
opt.set_weights(2 * w)
self.assertEqual(opt.weights, 2 * w)
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testWeightsPropertyWrite(self, optimizer_fn, **kwargs): # pylint: disable=unused-argument
opt = optimizer_fn()
with self.assertRaisesRegex(AttributeError, "can't set attribute"):
opt.weights = 1
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testClipnormProperty(self, optimizer_fn, **kwargs): # pylint: disable=unused-argument
opt = optimizer_fn()
if not opt.clipnorm:
opt.clipnorm = 1
clip_norm_val = opt.clipnorm
opt.clipnorm = 2 * clip_norm_val
self.assertEqual(opt.clipnorm, 2 * clip_norm_val)
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testGlobalClipnormProperty(self, optimizer_fn, **kwargs): # pylint: disable=unused-argument
opt = optimizer_fn()
if not opt.global_clipnorm:
opt.global_clipnorm = 1
clip_norm_val = opt.global_clipnorm
opt.global_clipnorm = 2 * clip_norm_val
self.assertEqual(opt.global_clipnorm, 2 * clip_norm_val)
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testClipvalueProperty(self, optimizer_fn, **kwargs): # pylint: disable=unused-argument
opt = optimizer_fn()
if not opt.clipvalue:
opt.clipvalue = 1
clip_val = opt.clipvalue
opt.clipvalue = 2 * clip_val
self.assertEqual(opt.clipvalue, 2 * clip_val)
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testVariablesMethod(self, optimizer_fn, **kwargs): # pylint: disable=unused-argument
opt = optimizer_fn()
self.assertEqual(opt.get_weights(), opt.variables())
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testGetSetWeights(self, optimizer_fn, **kwargs): # pylint: disable=unused-argument
opt_1 = optimizer_fn()
opt_2 = optimizer_fn()
opt_2.set_weights([w * 2 for w in opt_1.get_weights()])
for a, b in zip(opt_1.get_weights(), opt_2.get_weights()):
self.assertEqual(b, 2 * a)
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testMinimizeWithGradientTape(self, optimizer_fn, num_update_steps=1):
cfg = IPUConfig()
cfg.auto_select_ipus = 1
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
strategy = ipu_strategy.IPUStrategy()
with strategy.scope():
layer = dense_layer_fn()
optimizer = optimizer_fn()
loss = keras.losses.MeanSquaredError(reduction="sum")
@def_function.function(experimental_compile=True)
def f(a, t, _):
with GradientTape() as tape:
z = layer(a)
l = loss(z, t)
def ll():
return l
optimizer.minimize(ll, layer.trainable_variables, tape=tape)
return a, t, l
@def_function.function(experimental_compile=True)
def g(a, t):
_, _, l = loops.repeat(num_update_steps, f, inputs=[a, t, 0.0])
return l
losses = [strategy.run(g, args=data_fn()) for _ in range(3)]
self.verify_loss_decreases(losses)
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testMinimizeWithoutGradientTape(self, optimizer_fn, num_update_steps=1):
cfg = IPUConfig()
cfg.auto_select_ipus = 1
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
strategy = ipu_strategy.IPUStrategy()
with strategy.scope():
layer = dense_layer_fn()
optimizer = optimizer_fn()
loss = keras.losses.MeanSquaredError(reduction="sum")
@def_function.function(experimental_compile=True)
def f(a, t, _):
def l():
z = layer(a)
return loss(z, t)
ll = l()
optimizer.minimize(l, layer.trainable_variables)
return a, t, ll
@def_function.function(experimental_compile=True)
def g(a, t):
_, _, l = loops.repeat(num_update_steps, f, inputs=[a, t, 0.0])
return l
losses = [strategy.run(g, args=data_fn()) for _ in range(3)]
self.verify_loss_decreases(losses)
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testKerasSequentialModelTrain(self, optimizer_fn, num_update_steps=1):
cfg = IPUConfig()
cfg.auto_select_ipus = 1
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
strategy = ipu_strategy.IPUStrategy()
with strategy.scope():
m = sequential_model_fn(optimizer_fn, num_update_steps)
history = m.fit(*data_fn(), epochs=3, verbose=False)
losses = [l for l in history.history['loss']]
self.verify_loss_decreases(losses)
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testKerasFunctionalModelTrain(self, optimizer_fn, num_update_steps=1):
cfg = IPUConfig()
cfg.auto_select_ipus = 1
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
strategy = ipu_strategy.IPUStrategy()
with strategy.scope():
input_layer = keras.layers.Input(1)
x = dense_layer_fn()(input_layer)
l = keras.losses.MeanSquaredError(reduction="sum")
m = keras.Model(inputs=input_layer, outputs=x)
m.compile(loss=l,
optimizer=optimizer_fn(),
steps_per_execution=num_update_steps)
history = m.fit(*data_fn(), epochs=3, verbose=False)
losses = [l for l in history.history['loss']]
self.verify_loss_decreases(losses)
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testKerasSequentialPipelineTrain(self, optimizer_fn, **kwargs): # pylint: disable=unused-argument
if isinstance(optimizer_fn(), GradientAccumulationOptimizer):
return
cfg = IPUConfig()
cfg.auto_select_ipus = 2
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
strategy = ipu_strategy.IPUStrategy()
with strategy.scope():
m = keras.Sequential([
dense_layer_fn(), # Stage 0
dense_layer_fn(), # Stage 0.
dense_layer_fn(), # Stage 1.
dense_layer_fn(), # Stage 1.
])
m.set_pipelining_options(gradient_accumulation_steps_per_replica=4,
experimental_normalize_gradients=True)
m.set_pipeline_stage_assignment([0, 0, 1, 1])
m.compile(optimizer_fn(), loss='mse', steps_per_execution=8)
history = m.fit(*data_fn(), epochs=3, verbose=False)
losses = [l for l in history.history['loss']]
self.verify_loss_decreases(losses)
@parameterized.named_parameters(*TEST_CASES)
@test_util.run_v2_only
def testKerasFunctionalPipelineTrain(self, optimizer_fn, **kwargs): # pylint: disable=unused-argument
if isinstance(optimizer_fn(), GradientAccumulationOptimizer):
return
cfg = IPUConfig()
cfg.auto_select_ipus = 2
tu.add_hw_ci_connection_options(cfg)
cfg.configure_ipu_system()
strategy = ipu_strategy.IPUStrategy()
with strategy.scope():
input_layer = keras.layers.Input(1)
with ipu.keras.PipelineStage(0):
x = dense_layer_fn()(input_layer)
x = dense_layer_fn()(x)
with ipu.keras.PipelineStage(1):
x = dense_layer_fn()(x)
x = dense_layer_fn()(x)
m = keras.Model(inputs=input_layer, outputs=x)
m.set_pipelining_options(gradient_accumulation_steps_per_replica=4,
experimental_normalize_gradients=True)
m.compile(optimizer_fn(), loss='mse', steps_per_execution=8)
history = m.fit(*data_fn(), epochs=3, verbose=False)
losses = [l for l in history.history['loss']]
self.verify_loss_decreases(losses)
if __name__ == "__main__":
test.main()
|
import pickle
import tensorflow as tf
from tensorflow.keras import backend
class OptimizerState:
def __init__(self, optimizer):
self.config = optimizer.get_config()
self.weights = optimizer.get_weights()
# self.iterations = states.iterations
self.lr = backend.get_value(optimizer.lr)
self.iterations = backend.get_value(optimizer.iterations)
self.learning_rate = backend.get_value(optimizer.learning_rate)
def save(self, path):
with open(path, 'wb') as f:
pickle.dump(self, f)
@staticmethod
def load(path):
with open(path, 'rb') as f:
state = pickle.load(f)
return state
def empty_optimizer(self):
if self.config["name"] == "Adam":
return tf.keras.optimizers.Adam(
**self.config
)
raise Exception(f"Optimizer {self.config['name']} is not Supported!")
def apply_weights(self, model, optimizer):
grad_vars = model.trainable_weights
zero_grads = [tf.zeros_like(w) for w in grad_vars]
optimizer.apply_gradients(zip(zero_grads, grad_vars))
optimizer.set_weights(self.weights)
def apply(self, model):
optimizer = self.empty_optimizer()
self.apply_weights(model, optimizer)
backend.set_value(optimizer.lr, self.lr)
backend.set_value(optimizer.iterations, self.iterations)
backend.set_value(optimizer.learning_rate, self.learning_rate)
model.optimizer = optimizer
def __eq__(self, other):
if not isinstance(other, OptimizerState):
return False
for attribute in self.__dict__.keys():
if attribute == "weights":
continue
if getattr(self, attribute) != getattr(other, attribute):
return False
if len(self.weights) != len(other.weights):
return False
# weights_equal = tf.math.reduce_all( #<- memory issues on colab
# [tf.math.reduce_all(a == b) for a, b in zip(self.weights, other.weights)]
# )
weights_equal = all(
map(lambda xy: tf.math.reduce_all(xy[0] == xy[1]).numpy(), zip(self.weights, other.weights)))
return weights_equal
def __str__(self):
lines = ["{"]
for k, v in self.__dict__.items():
if k == "weights":
line = f"\t{k}:\t{str(type(v))},"
else:
line = f"\t{k}:\t{v},"
lines.append(line)
lines.append("}")
return "\n".join(lines)
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from odahuflow.sdk.models.base_model_ import Model
from odahuflow.sdk.models import util
class RemoteModelSource(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, model_connection: str=None, model_path: str=None): # noqa: E501
"""RemoteModelSource - a model defined in Swagger
:param model_connection: The model_connection of this RemoteModelSource. # noqa: E501
:type model_connection: str
:param model_path: The model_path of this RemoteModelSource. # noqa: E501
:type model_path: str
"""
self.swagger_types = {
'model_connection': str,
'model_path': str
}
self.attribute_map = {
'model_connection': 'modelConnection',
'model_path': 'modelPath'
}
self._model_connection = model_connection
self._model_path = model_path
@classmethod
def from_dict(cls, dikt) -> 'RemoteModelSource':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The RemoteModelSource of this RemoteModelSource. # noqa: E501
:rtype: RemoteModelSource
"""
return util.deserialize_model(dikt, cls)
@property
def model_connection(self) -> str:
"""Gets the model_connection of this RemoteModelSource.
ModelConnection is name of connection to object storage bucket where ML model files are expected # noqa: E501
:return: The model_connection of this RemoteModelSource.
:rtype: str
"""
return self._model_connection
@model_connection.setter
def model_connection(self, model_connection: str):
"""Sets the model_connection of this RemoteModelSource.
ModelConnection is name of connection to object storage bucket where ML model files are expected # noqa: E501
:param model_connection: The model_connection of this RemoteModelSource.
:type model_connection: str
"""
self._model_connection = model_connection
@property
def model_path(self) -> str:
"""Gets the model_path of this RemoteModelSource.
ModelPath is a directory inside ModelConnection where ML model files are located # noqa: E501
:return: The model_path of this RemoteModelSource.
:rtype: str
"""
return self._model_path
@model_path.setter
def model_path(self, model_path: str):
"""Sets the model_path of this RemoteModelSource.
ModelPath is a directory inside ModelConnection where ML model files are located # noqa: E501
:param model_path: The model_path of this RemoteModelSource.
:type model_path: str
"""
self._model_path = model_path
|
# coding: utf8
"""
Tests of High Jump / Pole Vault competition logic
"""
from unittest import TestCase, main
from decimal import Decimal
from athlib.highjump import HighJumpCompetition, _012
from athlib.exceptions import RuleViolation
ESAA_2015_HJ = [
# English Schools Senior Boys 2015 - epic jumpoff ending in a draw
# We did not include all other jumpers
# See http://www.esaa.net/v2/2015/tf/national/results/fcards/tf15-sb-field.pdf
# and http://www.englandathletics.org/england-athletics-news/great-action-at-the-english-schools-aa-championships
["place", "order", "bib", "first_name", "last_name", "team", "category",
"1.81", "1.86", "1.91", "1.97", "2.00", "2.03", "2.06", "2.09", "2.12", "2.12", "2.10", "2.12", "2.10", "2.12"],
["", 1, '85', "Harry", "Maslen", "WYork", "SB",
"o", "o", "o", "xo", "xxx"],
["", 2, '77', "Jake", "Field", "Surrey", "SB",
"xxx"],
["1", 4, '53', "William", "Grimsey", "Midd", "SB",
"", "", "", "o", "o", "o", "o", "o", "xxx", "x", "o", "x", "o", "x"],
["1", 5, '81', "Rory", "Dwyer", "Warks", "SB",
"", "", "", "o", "o", "o", "o", "o", "xxx", "x", "o", "x", "o", "x"]
]
_1066 = [
#based on above, but we have a winner
["place", "order", "bib", "first_name", "last_name", "team", "category",
"1.81", "1.86", "1.91", "1.97", "2.00", "2.03", "2.06", "2.09", "2.12", "2.12", "2.10", "2.12", "2.10", "2.12", "2.11"],
["", 1, '85', "Dafydd", "Briton", "WYork", "SB",
"o", "o", "o", "xo", "xxx"],
["", 2, '77', "Jake", "Saxon", "Surrey", "SB",
"xxx"],
["1", 4, '53', "William", "Norman", "Midd", "SB",
"", "", "", "o", "o", "o", "o", "o", "xxx", "x", "o", "x", "o", "x", "x"],
["1", 5, '81', "Harald", "England", "Warks", "SB",
"", "", "", "o", "o", "o", "o", "o", "xxx", "x", "o", "x", "o", "x", "o"]
]
RIO_MENS_HJ = [ # pasted from Wikipedia
["place", "order", "bib", "first_name", "last_name", "team", "category", "2.20", "2.25", "2.29", "2.33", "2.36", "2.38", "2.40", "best", "note"],
["1", 7, 2197, "Derek", "Drouin", "CAN", "M", "o", "o", "o", "o", "o", "o", "x", 2.38, ""],
["2", 9, 2878, "Mutaz", "Essa Barshim", "QAT", "M", "o", "o", "o", "o", "o", "xxx", "", 2.36, ""],
["3", 3, 3026, "Bohdan", "Bondarenko", "UKR", "M", "-", "o", "-", "o", "-", "xx-", "x", 2.33, ""],
["4=", 8, 2456, "Robert", "Grabarz", "GBR", "M", "o", "xo", "o", "o", "xxx", "", "", 2.33, "=SB"],
["4=", 15, 3032, "Andriy", "Protsenko", "UKR", "M", "o", "o", "xo", "o", "xxx", "", "", 2.33, "SB"],
["6", 6, 3084, "Erik", "Kynard", "USA", "M", "o", "xo", "o", "xxo", "xxx", "", "", 2.33, ""],
["7=", 5, 2961, "Majededdin", "Ghazal", "SYR", "M", "o", "o", "o", "xxx", "", "", "", 2.29, ""],
["7=", 12, 2294, "Kyriakos", "Ioannou", "CYP", "M", "o", "o", "o", "xxx", "", "", "", 2.29, ""],
["7=", 13, 2076, "Donald", "Thomas", "BAH", "M", "o", "o", "o", "xxx", "", "", "", 2.29, ""],
["10", 1, 2182, "Tihomir", "Ivanov", "BUL", "M", "o", "xo", "o", "xxx", "", "", "", 2.29, "=PB"],
["11", 10, 2062, "Trevor", "Barry", "BAH", "M", "o", "o", "xxx", "", "", "", "", 2.25, ""],
["12", 4, 2293, "Dimitrios", "Chondrokoukis", "M", "CYP", "xo", "o", "xxx", "", "", "", "", 2.25, ""],
["13", 11, 2871, "Luis", "Castro", "PUR", "M", "o", "xxo", "xxx", "", "", "", "", 2.25, ""],
["14", 14, 2297, "Jaroslav", "Bába", "CZE", "M", "o", "xxx", "", "", "", "", "", 2.2, ""],
["15", 2, 2052, "Brandon", "Starc", "AUS", "M", "xo", "xxx", "", "", "", "", "", 2.2, ""]
]
class HighJumpTests(TestCase):
def test_competition_setup(self):
"""Tests basic creation of athletes with names and bibs"""
c = HighJumpCompetition.from_matrix(ESAA_2015_HJ, to_nth_height=0)
self.assertEqual("Dwyer", c.jumpers[-1].last_name)
self.assertEqual("Maslen", c.jumpers_by_bib['85'].last_name)
def test_progression(self):
c = HighJumpCompetition.from_matrix(ESAA_2015_HJ, to_nth_height=0)
h1 = Decimal("1.81")
c.set_bar_height(h1)
# round 1
c.cleared('85')
j = c.jumpers_by_bib['85']
self.assertEqual(j.attempts_by_height, ['o'])
self.assertEqual(j.highest_cleared, h1)
c.failed('77')
c.failed('77')
c.failed('77')
jake_field = c.jumpers_by_bib['77']
self.assertEqual(jake_field.highest_cleared, Decimal("0.00"))
self.assertEqual(jake_field.attempts_by_height, ['xxx'])
self.assertTrue(jake_field.eliminated)
harry_maslen = c.jumpers_by_bib['85']
# attempt at fourth jump should fail
self.assertRaises(RuleViolation, c.failed, '77')
self.assertEqual(jake_field.place, 4)
self.assertEqual(harry_maslen.place, 1)
def test_replay_to_jumpoff(self):
"Run through to where the jumpoff began - ninth bar position"
c = HighJumpCompetition.from_matrix(ESAA_2015_HJ, to_nth_height=9)
# see who is winning
maslen = c.jumpers_by_bib['85']
field = c.jumpers_by_bib['77']
grimsey = c.jumpers_by_bib['53']
dwyer = c.jumpers_by_bib['81']
self.assertEqual(field.place, 4)
self.assertEqual(maslen.place, 3)
self.assertEqual(grimsey.place, 1)
self.assertEqual(dwyer.place, 1)
# print "after 2:12 round"
# print grimsey.failures_at_height
# print grimsey.consecutive_failures
# print grimsey.attempts_by_height
# if not for jump-off rules, it would be game over
self.assertEqual(len(c.remaining), 2)
self.assertEqual(c.state, 'jumpoff')
def test_replay_through_jumpoff(self):
"Run through a jumpoff to a draw"
c = HighJumpCompetition.from_matrix(ESAA_2015_HJ)
self.assertRaises(RuleViolation,c.failed,'53')
# see who is winning
maslen = c.jumpers_by_bib['85']
field = c.jumpers_by_bib['77']
grimsey = c.jumpers_by_bib['53']
dwyer = c.jumpers_by_bib['81']
self.assertEqual(field.place, 4)
self.assertEqual(maslen.place, 3)
self.assertEqual(grimsey.place, 1)
self.assertEqual(dwyer.place, 1)
self.assertEqual(len(c.remaining), 2)
self.assertEqual(c.state, 'jumpoff')
def test_replay_jumpoff_and_finish(self):
"Run through a jumpoff to the final winner"
c = HighJumpCompetition.from_matrix(_1066)
self.assertRaises(RuleViolation,c.failed,'53')
self.assertRaises(RuleViolation,c.failed,'81')
# see who is winning
briton = c.jumpers_by_bib['85']
saxon = c.jumpers_by_bib['77']
norman = c.jumpers_by_bib['53']
england = c.jumpers_by_bib['81']
self.assertEqual(saxon.place, 4)
self.assertEqual(briton.place, 3)
self.assertEqual(norman.place, 2)
self.assertEqual(england.place, 1)
self.assertEqual(len(c.remaining), 1)
self.assertEqual(c.state, 'finished')
self.assertEqual(england.highest_cleared, Decimal("2.11"))
self.assertRaises(RuleViolation,c.set_bar_height, Decimal("2.12"))
def test_countback_to_tie(self):
"Run both fail, but tie countback wins"
c = HighJumpCompetition.from_matrix(
[
["place", "order", "bib", "first_name", "last_name", "2.06", "2.08", "2.10", "2.12", "2.14"],
["", 1, 'A', "Harald", "England", "o", "o", "xo", "xo", "xxx"],
["", 2, 'B', "William", "Norman", "o", "o", "o", "xxo", "xxx"],
]
)
self.assertRaises(RuleViolation,c.failed,'A')
self.assertRaises(RuleViolation,c.failed,'B')
# see who is winning
A = c.jumpers_by_bib['A']
B = c.jumpers_by_bib['B']
self.assertEqual(A.place, 1)
self.assertEqual(B.place, 2)
self.assertEqual(len(c.remaining), 0)
self.assertEqual(c.state, 'finished')
self.assertEqual(A.highest_cleared, Decimal("2.12"))
self.assertEqual(B.highest_cleared, Decimal("2.12"))
self.assertEqual(A.ranking_key,(0, Decimal('-2.12'), 1, 2))
self.assertEqual(B.ranking_key,(0, Decimal('-2.12'), 2, 2))
def test_countback_total_failure_rank(self):
"test_countback_total_failure_rank"
c = HighJumpCompetition.from_matrix(
[
["place", "order", "bib", "first_name", "last_name", "2.06", "2.08"],
["", 1, 'A', "Harald", "England", "o", "o"],
["", 2, 'B', "William", "Norman", "xxx"],
]
)
self.assertRaises(RuleViolation,c.failed,'B')
# see who is winning
A = c.jumpers_by_bib['A']
B = c.jumpers_by_bib['B']
self.assertEqual(A.place, 1)
self.assertEqual(B.place, 2)
self.assertEqual(len(c.remaining), 1)
self.assertEqual(c.state, 'won')
self.assertEqual(A.highest_cleared, Decimal("2.08"))
self.assertEqual(B.highest_cleared, Decimal("0.00"))
self.assertEqual(A.ranking_key,(0, Decimal('-2.08'), 0, 0))
self.assertEqual(B.ranking_key,(2, Decimal('0.00'), 0, 0))
def test_countback_to_total_failures(self):
"test_countback_to_total_failures"
c = HighJumpCompetition.from_matrix(
[
["place", "order", "bib", "first_name", "last_name", "2.06", "2.08", "2.10", "2.12", "2.14"],
["", 1, 'A', "Harald", "England", "o", "o", "xo", "xo", "xxx"],
["", 2, 'B', "William", "Norman", "o", "xo", "xo", "xo", "xxx"],
]
)
self.assertRaises(RuleViolation,c.failed,'A')
self.assertRaises(RuleViolation,c.failed,'B')
# see who is winning
A = c.jumpers_by_bib['A']
B = c.jumpers_by_bib['B']
self.assertEqual(A.place, 1)
self.assertEqual(B.place, 2)
self.assertEqual(len(c.remaining), 0)
self.assertEqual(c.state, 'finished')
self.assertEqual(A.highest_cleared, Decimal("2.12"))
self.assertEqual(B.highest_cleared, Decimal("2.12"))
self.assertEqual(A.ranking_key,(0, Decimal('-2.12'), 1, 2))
self.assertEqual(B.ranking_key,(0, Decimal('-2.12'), 1, 3))
def test_won_ending(self):
"check the status changes at a won ending which finishes"
mx = [
["place", "order", "bib", "first_name", "last_name", "team", "category"],
["1", 1, '53', "William", "Norman", "Midd", "SB"],
["1", 2, '81', "Harald", "England", "Warks", "SB"],
]
c = HighJumpCompetition.from_matrix(mx)
self.assertEqual(c.state,'scheduled')
self.assertEqual(len(c.remaining),2)
for height,perfs,xstate,lenremj in (
(2.11,("o","o"),'started',2),
(2.12,("o","o"),'started',2),
(2.13,("o","o"),'started',2),
(2.14,("xxx","o"),'won',1),
(2.16,("","o"),'won',1),
(2.17,("","xxo"),'won',1),
(2.18,("","xxx"),'finished',0)):
c.set_bar_height(height)
for i in _012:
for j,p in enumerate(perfs):
if len(p)<i+1: continue
c.bib_trial(mx[1+j][2],p[i])
self.assertEqual(c.state,xstate,"height=%s expected state %s not %s" % (height,xstate,c.state))
self.assertEqual(len(c.remaining),lenremj,"height=%s expected lenremj %s not %s" % (height,lenremj,len(c.remaining)))
def test_score_olympic_final(self):
"Do we get the same results as the Olympics?"
c = HighJumpCompetition.from_matrix(RIO_MENS_HJ, verbose=False)
# for r in c.ranked_jumper
# all the positions should agree
given_finish_positions = []
for row in RIO_MENS_HJ[1:]:
place, order, bib = row[0:3]
expected_place = int(place.replace('=', ''))
jumper = c.jumpers_by_bib[str(bib)]
actual_place = jumper.place
self.assertEqual(actual_place, expected_place)
def test_dismissed(self):
c = HighJumpCompetition()
c.add_jumper(bib='A',first_name='Harald',last_name='England')
c.add_jumper(bib='B',first_name='William',last_name='Norman')
self.assertRaises(RuleViolation,c.cleared,'A')
self.assertRaises(RuleViolation,c.passed,'A')
self.assertRaises(RuleViolation,c.failed,'A')
self.assertRaises(RuleViolation,c.retired,'A')
c.set_bar_height(Decimal('2.00'))
A=c.jumpers_by_bib['A']
B=c.jumpers_by_bib['B']
self.assertEqual(A.dismissed,False)
self.assertEqual(B.dismissed,False)
c.cleared('A')
c.passed('B')
self.assertEqual(A.dismissed,True)
self.assertEqual(B.dismissed,True)
c.set_bar_height(Decimal('2.02'))
self.assertEqual(A.dismissed,False)
self.assertEqual(B.dismissed,False)
c.cleared('A')
c.failed('B')
self.assertEqual(A.dismissed,True)
self.assertEqual(B.dismissed,False)
c.passed('B')
self.assertEqual(B.dismissed,True)
def test_trials(self):
c = HighJumpCompetition()
c.add_jumper(bib='A',first_name='Harald',last_name='England')
c.add_jumper(bib='B',first_name='William',last_name='Norman')
h1 = Decimal('1.10')
h2 = Decimal('1.15')
h3 = Decimal('1.14')
c.set_bar_height(h1)
self.assertEqual(c.state,'started','state should be started')
self.assertEqual(c.is_finished,False,'not finished')
self.assertEqual(c.is_running,True,'is running')
c.cleared('A')
c.cleared('B')
c.set_bar_height(h2)
c.failed('A')
c.failed('B')
c.failed('A')
c.failed('B')
c.failed('A')
c.failed('B')
self.assertEqual(c.state,'jumpoff','jumpoff state should be reached')
self.assertEqual(c.is_finished, False,"jumpoff competition is not finished")
self.assertEqual(c.is_running, True,"jumpoff competition is running")
c.set_bar_height(h3)
self.assertEqual(c.trials,[('A',h1,'o'),('B',h1,'o'),('A',h2,'x'),('B',h2,'x'),('A',h2,'x'),('B',h2,'x'),('A',h2,'x'),('B',h2,'x')],'trials in jumpoff state')
c.failed('A')
self.assertEqual(c.state,'jumpoff','still in jumpoff after A fails at 1.14')
self.assertEqual(c.trials,[('A',h1,'o'),('B',h1,'o'),('A',h2,'x'),('B',h2,'x'),('A',h2,'x'),('B',h2,'x'),('A',h2,'x'),('B',h2,'x'),('A',h3,'x')], 'trials after A fails at 1.14')
c.cleared('B')
self.assertEqual(c.state,'finished','state finished after B clears at 1.14')
fal = [('A',h1,'o'),('B',h1,'o'),('A',h2,'x'),('B',h2,'x'),('A',h2,'x'),('B',h2,'x'),('A',h2,'x'),('B',h2,'x'),('A',h3,'x'),('B',h3,'o')]
fadl = [dict(bib=a[0],height=a[1],result=a[2]) for a in fal]
self.assertEqual(c.trials, fal, 'final trials')
self.assertEqual(c.trial_objs, fadl, 'final trial_objs')
self.assertEqual(c.from_actions().trials, fal, 'd.from_actions().trials should match c.trials')
self.assertEqual(c.from_actions().trial_objs, fadl, 'd.from_actions().trial_objs should match c.trial_objs')
def test_action_letter(self):
c = HighJumpCompetition()
self.assertEqual(c.action_letter['cleared'],'o',"action_letter['cleared']=='o'")
self.assertEqual(c.action_letter['failed'],'x',"action_letter['failed']=='x'")
self.assertEqual(c.action_letter['passed'],'-',"action_letter['passed']=='-'")
self.assertEqual(c.action_letter['retired'],'r',"action_letter['passed']=='r'")
actions_a = [["add_jumper",dict(bib='A')], ["add_jumper",dict(bib='B')], ["set_bar_height",1.1], ["cleared","A"], ["cleared","B"],
["set_bar_height",1.2], ["failed","A"], ["failed","B"], ["failed","A"], ["failed","B"], ["failed","A"], ["failed","B"],
["set_bar_height",1.15], ["cleared","A"], ["cleared","B"], ["set_bar_height",1.17], ["failed","A"], ["failed","B"],
["set_bar_height",1.16], ["retired","A"], ["retired","B"]]
matrix_a = [
["bib", "1.10", "1.20", "1.15", "1.17", "1.16"],
["A", "o", "xxx", "o", "x", "r"],
["B", "o", "xxx", "o", "x", "r"],
]
def test_retire_after_jumpoff(self):
c = HighJumpCompetition().from_actions(self.actions_a)
self.assertEqual(c.state,'drawn','both retiring after jumpoffs should draw')
c = HighJumpCompetition().from_matrix(self.matrix_a)
self.assertEqual(c.state,'drawn','both retiring after jumpoffs should draw')
self.assertEqual(c.to_matrix(),self.matrix_a,'matrix round trip should match')
self.assertEqual(c.is_finished, True,"competition is finished")
self.assertEqual(c.is_running, False,"competition is not running")
def test_rieto_pv(self):
c = HighJumpCompetition.from_matrix(
[_.split() for _ in '''bib 3.00 3.20 3.40 3.60 3.70 3.80 3.85 3.90 3.95 4.00
QF000595 - - o o - o - o xxx
EH004164 - - - xo xo o o xxo xxx
JA112119 - - o xo o xxx
CB064342 - - o xo o xxx
FC009594 - - - o o x- o xx- x
HC000711 - - o o - xxx
CF058632 - - xo xo xxx
GL001818 xo o xxx
EC001108 o xo o xxx
VA008725 o o xxo xxx
JE001383 o o xxx
CG000293 o xo xxx
BC000303 o xo xxx
EE010870 - - o o xxo o xx- x
EE006186 xo xo xxx
JC003084 o xxx
EF007915 - - o o xo o xxo xo xxo xxx
GL000737 o xxx
DA011840 o o xxx
CK006373 xo xxx
GJ001614 xo xxx
ED000485 x- xx
JA103141 xxx'''.split('\n')]
, verbose=False)
self.assertEqual(c.state,'finished','One winning jumper failed at his chosen height')
self.assertEqual(c.jumpers_by_bib['EF007915'].place,1,'EF007915 came first')
if __name__ == '__main__':
main()
|
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import logging
import storops.unity.resource.cifs_share
import storops.unity.resource.nas_server
import storops.unity.resource.nfs_share
import storops.unity.resource.pool
from storops.exception import UnityResourceNotFoundError, \
UnityCifsServiceNotEnabledError, UnityShareShrinkSizeTooLargeError, \
UnityShareShrinkSizeTooSmallError, UnityLocalReplicationFsNameNotSameError
from storops.lib.common import supplement_filesystem
from storops.lib.version import version
from storops.unity.enums import FSSupportedProtocolEnum, TieringPolicyEnum, \
SnapStateEnum, AccessPolicyEnum, FSLockingPolicyEnum
from storops.unity.resource import UnityResource, UnityResourceList
from storops.unity.resource.replication_session import \
UnityReplicationSession, UnityResourceConfig
from storops.unity.resource.snap import UnitySnap, UnitySnapList
from storops.unity.resource.storage_resource import UnityStorageResource
from storops.unity.resp import RestResponse
from storops.unity.client import UnityClient
__author__ = 'Jay Xu'
log = logging.getLogger(__name__)
class UnityFileSystem(UnityResource):
@classmethod
def create(cls, cli, pool, nas_server, name, size, proto=None,
is_thin=None, tiering_policy=None, user_cap=False,
is_compression=None, access_policy=None,
locking_policy=None, description=None):
pool_clz = storops.unity.resource.pool.UnityPool
nas_server_clz = storops.unity.resource.nas_server.UnityNasServer
if proto is None:
proto = FSSupportedProtocolEnum.NFS
pool = pool_clz.get(cli, pool)
nas_server = nas_server_clz.get(cli, nas_server)
size = supplement_filesystem(size, user_cap)
fs_param = cls.prepare_fs_parameters(
pool=pool, nas_server=nas_server,
supported_protocols=proto,
is_thin_enabled=is_thin,
size=size,
tiering_policy=tiering_policy,
is_compression=is_compression,
access_policy=access_policy,
locking_policy=locking_policy)
req_body = cli.make_body(allow_empty=True, name=name,
description=description,
fsParameters=fs_param)
resp = cli.type_action(UnityStorageResource().resource_class,
'createFilesystem',
**req_body)
resp.raise_if_err()
sr = UnityStorageResource(_id=resp.resource_id, cli=cli)
return sr.filesystem
def modify(self, size=None, is_thin=None, tiering_policy=None,
user_cap=False, is_compression=None, access_policy=None,
locking_policy=None, description=None,
cifs_fs_parameters=None):
sr = self.storage_resource
if sr is None:
raise ValueError('storage resource for filesystem {} not found.'
.format(self.name))
if size:
size = supplement_filesystem(size, user_cap)
fs_param = self.prepare_fs_parameters(
is_thin_enabled=is_thin, size=size,
tiering_policy=tiering_policy,
is_compression=is_compression,
access_policy=access_policy,
locking_policy=locking_policy)
params = {}
if fs_param:
params['fsParameters'] = fs_param
if cifs_fs_parameters:
params['cifsFsParameters'] = cifs_fs_parameters
if description is not None:
params['description'] = description
if not params:
return RestResponse('', self._cli)
req_body = self._cli.make_body(allow_empty=True, **params)
resp = sr.modify_fs(**req_body)
resp.raise_if_err()
return resp
@property
def first_available_cifs_server(self):
ret = None
if self.nas_server is not None:
try:
ret = self.nas_server.get_cifs_server()
except UnityCifsServiceNotEnabledError as e:
log.info(e.message)
return ret
def delete(self, force_snap_delete=False, force_vvol_delete=False,
async_mode=False):
sr = self.storage_resource
if not self.existed or sr is None:
raise UnityResourceNotFoundError(
'cannot find filesystem {}.'.format(self.get_id()))
resp = self._cli.delete(sr.resource_class,
sr.get_id(),
forceSnapDeletion=force_snap_delete,
forceVvolDeletion=force_vvol_delete,
async_mode=async_mode)
resp.raise_if_err()
return resp
def extend(self, new_size, user_cap=False):
sr = self.storage_resource
new_size = supplement_filesystem(new_size, user_cap)
param = self._cli.make_body(size=new_size)
resp = sr.modify_fs(fsParameters=param)
resp.raise_if_err()
return resp
def shrink(self, new_size, user_cap=False):
sr = self.storage_resource
new_size = supplement_filesystem(new_size, user_cap)
size_used = sr.size_used
if size_used and int(size_used) > new_size:
message = 'Reject shrink share request, ' \
'the new size should be larger than used.'
raise UnityShareShrinkSizeTooSmallError(message)
param = self._cli.make_body(size=new_size)
size_total = sr.size_total
if size_total and int(size_total) < new_size:
message = 'Reject shrink share request, ' \
'the new size should be smaller than original.'
raise UnityShareShrinkSizeTooLargeError(message)
resp = sr.modify_fs(fsParameters=param)
resp.raise_if_err()
return resp
def create_nfs_share(self, name, path=None, share_access=None,
min_security=None, no_access_hosts=None,
read_only_hosts=None, read_write_hosts=None,
root_access_hosts=None,
read_only_root_access_hosts=None,
no_access_hosts_string=None,
read_only_hosts_string=None,
read_write_hosts_string=None,
read_only_root_hosts_string=None,
root_access_hosts_string=None,
anonymous_uid=None, anonymous_gid=None,
export_option=None, description=None):
clz = storops.unity.resource.nfs_share.UnityNfsShare
return clz.create(
self._cli, name=name, fs=self,
path=path, share_access=share_access,
min_security=min_security,
no_access_hosts=no_access_hosts,
read_only_hosts=read_only_hosts,
read_write_hosts=read_write_hosts,
root_access_hosts=root_access_hosts,
read_only_root_access_hosts=read_only_root_access_hosts,
no_access_hosts_string=no_access_hosts_string,
read_only_hosts_string=read_only_hosts_string,
read_write_hosts_string=read_write_hosts_string,
read_only_root_hosts_string=read_only_root_hosts_string,
root_access_hosts_string=root_access_hosts_string,
anonymous_uid=anonymous_uid,
anonymous_gid=anonymous_gid,
export_option=export_option,
description=description)
def create_cifs_share(self, name, path=None, cifs_server=None,
is_read_only=None, is_encryption_enabled=None,
is_con_avail_enabled=None, is_abe_enabled=None,
is_branch_cache_enabled=None,
offline_availability=None,
umask=None, description=None):
clz = storops.unity.resource.cifs_share.UnityCifsShare
return clz.create(
self._cli, name=name, fs=self,
path=path, cifs_server=cifs_server,
is_read_only=is_read_only,
is_encryption_enabled=is_encryption_enabled,
is_con_avail_enabled=is_con_avail_enabled,
is_abe_enabled=is_abe_enabled,
is_branch_cache_enabled=is_branch_cache_enabled,
offline_availability=offline_availability,
umask=umask, description=description)
def create_snap(self, name=None,
description=None, is_auto_delete=None,
retention_duration=None, is_read_only=None,
fs_access_type=None):
return UnitySnap.create(cli=self._cli,
storage_resource=self.storage_resource,
name=name,
description=description,
is_auto_delete=is_auto_delete,
retention_duration=retention_duration,
is_read_only=is_read_only,
fs_access_type=fs_access_type)
@property
def snapshots(self):
return UnitySnapList(cli=self._cli,
storage_resource=self.storage_resource)
def has_snap(self, ignore_system_snap=False):
""" This method won't count the snaps in "destroying" state!
:param ignore_system_snap: ignore the system snap if True.
:return: false if no snaps or all snaps are destroying.
"""
snaps = filter(lambda s: s.state != SnapStateEnum.DESTROYING,
self.snapshots)
if ignore_system_snap:
snaps = filter(lambda s: not s.is_system_snap, snaps)
return len(list(snaps)) > 0
def replicate_with_dst_resource_provisioning(self, max_time_out_of_sync,
dst_pool_id,
dst_fs_name=None,
remote_system=None,
replication_name=None,
dst_size=None,
is_dst_thin=None,
dst_tiering_policy=None,
is_dst_compression=None):
"""
Creates a replication session with destination filesystem provisioning.
:param max_time_out_of_sync: maximum time to wait before syncing the
source and destination. Value `-1` means the automatic sync is not
performed. `0` means it is a sync replication.
:param dst_pool_id: id of pool to allocate destination filesystem.
:param dst_fs_name: destination filesystem name. If `remote_system` is
`None` (for local replication creation), `dst_fs_name` should be
same as the source fs name or `None`.
:param remote_system: `UnityRemoteSystem` object. The remote system to
which the replication is being configured. When not specified, it
defaults to local system.
:param replication_name: replication name.
:param dst_size: destination filesystem size.
:param is_dst_thin: indicates whether destination filesystem is thin or
not.
:param dst_tiering_policy: `TieringPolicyEnum` value. Tiering policy of
destination filesystem.
:param is_dst_compression: indicates whether destination filesystem is
compression enabled or not.
:return: created replication session.
"""
if dst_fs_name is None:
dst_fs_name = self.name
if remote_system is None and dst_fs_name != self.name:
raise UnityLocalReplicationFsNameNotSameError(
'dst_fs_name passed in for creating filesystem local '
'replication should be same as source filesystem name '
'or None')
dst_size = self.size_total if dst_size is None else dst_size
dst_resource = UnityResourceConfig.to_embedded(
name=dst_fs_name, pool_id=dst_pool_id, size=dst_size,
tiering_policy=dst_tiering_policy, is_thin_enabled=is_dst_thin,
is_compression_enabled=is_dst_compression)
return UnityReplicationSession.create_with_dst_resource_provisioning(
self._cli, self.storage_resource.get_id(),
dst_resource, max_time_out_of_sync,
remote_system=remote_system, name=replication_name)
@staticmethod
def prepare_fs_parameters(**kwargs):
@version('<4.3')
def make_compression_body(is_compression=None):
return UnityClient.make_body(
allow_empty=True, isCompressionEnabled=is_compression)
@version('>=4.3') # noqa
def make_compression_body(is_compression=None):
return UnityClient.make_body(
allow_empty=True, isDataReductionEnabled=is_compression)
access_policy = kwargs.get('access_policy')
locking_policy = kwargs.get('locking_policy')
supported_protocols = kwargs.get('supported_protocols')
tiering_policy = kwargs.get('tiering_policy')
AccessPolicyEnum.verify(access_policy)
FSSupportedProtocolEnum.verify(supported_protocols)
FSLockingPolicyEnum.verify(locking_policy)
TieringPolicyEnum.verify(tiering_policy)
fs_param = UnityClient.make_body(
allow_empty=True,
pool=kwargs.get('pool'),
nasServer=kwargs.get('nas_server'),
supportedProtocols=supported_protocols,
isThinEnabled=kwargs.get('is_thin_enabled'),
size=kwargs.get('size'),
accessPolicy=access_policy,
lockingPolicy=locking_policy)
if tiering_policy:
fs_param['fastVPParameters'] = UnityClient.make_body(
allow_empty=True, tieringPolicy=tiering_policy)
compression_body = make_compression_body(kwargs.get('is_compression'))
fs_param.update(compression_body)
return fs_param
@staticmethod
def prepare_cifs_fs_parameters(
is_cifs_sync_writes_enabled=None,
is_cifs_op_locks_enabled=None,
is_cifs_notify_on_write_enabled=None,
is_cifs_notify_on_access_enabled=None,
cifs_notify_on_change_dir_depth=None):
return UnityClient.make_body(
allow_empty=True,
isCIFSSyncWritesEnabled=is_cifs_sync_writes_enabled,
isCIFSOpLocksEnabled=is_cifs_op_locks_enabled,
isCIFSNotifyOnWriteEnabled=is_cifs_notify_on_write_enabled,
isCIFSNotifyOnAccessEnabled=is_cifs_notify_on_access_enabled,
cifsNotifyOnChangeDirDepth=cifs_notify_on_change_dir_depth)
class UnityFileSystemList(UnityResourceList):
@classmethod
def get_resource_class(cls):
return UnityFileSystem
|
import status
import authstate
class WelcomeState():
def __init__(self, handler):
self.handler = handler
def run(self):
self.handler.send_response(status.OK, "POP3", "I'm a teapot")
return authstate.AuthorizationState(self.handler)
|
import featuretools as ft
import pandas as pd
from unittest.mock import patch
import pytest
import autonormalize as an
def test_ft_mock_customer():
df = ft.demo.load_mock_customer(n_customers=80, n_products=50, n_sessions=200,
n_transactions=10000, return_single_table=True)
entityset = an.auto_entityset(df, name="Customer Transactions", time_index='transaction_time')
assert set(entityset['transaction_id'].columns) == set(['transaction_id', 'session_id', 'transaction_time',
'product_id', 'amount'])
assert set(entityset['product_id'].columns) == set(['product_id', 'brand'])
assert set(entityset['session_id'].columns) == set(['session_id', 'customer_id', 'device', 'session_start'])
assert set(entityset['customer_id'].columns) == set(['customer_id', 'zip_code', 'join_date', 'birthday'])
assert set([str(rel) for rel in entityset.relationships]) == set(['<Relationship: transaction_id.session_id -> session_id.session_id>',
'<Relationship: transaction_id.product_id -> product_id.product_id>',
'<Relationship: session_id.customer_id -> customer_id.customer_id>'])
@patch("autonormalize.autonormalize.auto_entityset")
def test_normalize_entityset(auto_entityset):
df1 = pd.DataFrame({"test": [0, 1, 2]})
df2 = pd.DataFrame({"test": [0, 1, 2]})
accuracy = 0.98
es = ft.EntitySet()
error = "This EntitySet is empty"
with pytest.raises(ValueError, match=error):
an.normalize_entityset(es, accuracy)
es.add_dataframe(df1, "df")
df_out = es.dataframes[0]
an.normalize_entityset(es, accuracy)
auto_entityset.assert_called_with(df_out, accuracy, index=df_out.ww.index, name=es.id, time_index=df_out.ww.time_index)
es.add_dataframe(df2, "df2")
error = "There is more than one dataframe in this EntitySet"
with pytest.raises(ValueError, match=error):
an.normalize_entityset(es, accuracy)
|
#!/usr/bin/env python
# coding:utf-8
"""
Name : check_db_connection.py
Author : Dmitry Kruchinin
Date : 7/1/2021
Desc:
"""
from fixture.orm import ORMFixture
from model.group import Group
db = ORMFixture(host="localhost", database="addressbook", user="root", password="")
try:
groups = db.get_groups_list()
contacts = db.get_contacts_list()
contacts_in_group = db.get_contacts_in_group(Group(id="248"))
contacts_not_in_group = db.get_contacts_not_in_group(Group(id="248"))
print("####### Groups")
for group in groups:
print(group)
print(len(groups))
print("####### Contacts")
for contact in contacts:
print(contact)
print(len(contacts))
print("####### Contacts in group")
for contact_in_group in contacts_in_group:
print(contact_in_group)
print(len(contacts_in_group))
print("####### Contacts NOT in group")
for contact_not_in_group in contacts_not_in_group:
print(contact_not_in_group)
print(len(contacts_not_in_group))
finally:
pass # db.destroy()
|
"""Main module for tests."""
import os
import pandas as pd
import en_core_web_sm
from narcy.nlp.utils import Relation, document_factory
from narcy.processors import reduce_relations
from narcy.processors import doc_to_relations_df, doc_to_svos_df
from narcy.processors import doc_to_tokens_df
_dirpath = os.path.join(os.path.split(__file__)[0], 'data')
def get_docs():
make_doc = document_factory(en_core_web_sm.load())
documents = []
for text in os.listdir(_dirpath):
with open(os.path.join(_dirpath, text)) as stream:
doc = make_doc(stream.read().strip())
documents.append(doc)
return documents
def _test_relations(doc, reduced):
relations = doc._.relations
if reduced:
relations = reduce_relations(relations)
for relation in relations:
assert isinstance(relation, Relation)
def _test_doc_to_relations_df(doc, reduced):
df = doc_to_relations_df(doc, reduced=reduced)
assert isinstance(df, pd.DataFrame)
assert df.shape != (0, 0)
def _test_doc_to_svos_df(doc):
df = doc_to_svos_df(doc)
assert isinstance(df, pd.DataFrame)
assert df.shape != (0, 0)
def _test_doc_to_tokens_df(doc):
df = doc_to_tokens_df(doc)
assert isinstance(df, pd.DataFrame)
assert df.shape != (0, 0)
|
# SISO program yes.py
# Returns 'yes' for all inputs.
import utils
from utils import rf
def yes(inString):
return "yes"
def testYes():
testVals = [
("", "yes"),
("x", "yes"),
("asdf", "yes"),
("GAGAGAGAGAGA", "yes"),
]
for (inString, solution) in testVals:
val = yes(inString)
utils.tprint(inString, ":", val)
assert val == solution
|
# Returns a valid response when a request has appropriate credentials.
def main(request, response):
cookie = request.cookies.first("cookieName", None)
expected_value = request.GET.first("value", None)
source_origin = request.headers.get("origin", None)
response_headers = [("Content-Type", "text/javascript"),
("Access-Control-Allow-Origin", source_origin),
("Access-Control-Allow-Credentials", "true")]
if cookie == expected_value:
return (200, response_headers, "")
return (404, response_headers)
|
# Copyright 2020 MIT Probabilistic Computing Project.
# See LICENSE.txt
import os
import re
from setuptools import setup
# Specify the requirements.
requirements = {
'src' : [
'astunparse==1.6.3',
'numpy==1.19',
'scipy==1.7.3',
'sympy==1.6',
],
'magics' : [
'graphviz==0.13.2',
'ipython==7.13.0',
'jupyter-core==4.6.3',
'networkx==2.4',
'notebook==6.0.3',
'matplotlib==3.3.2',
'pygraphviz==1.5',
],
'tests' : [
'pytest-timeout==1.3.3',
'pytest==5.2.2',
'coverage==5.3',
]
}
requirements['all'] = [r for v in requirements.values() for r in v]
# Determine the version (hardcoded).
dirname = os.path.dirname(os.path.realpath(__file__))
vre = re.compile('__version__ = \'(.*?)\'')
m = open(os.path.join(dirname, 'src', '__init__.py')).read()
__version__ = vre.findall(m)[0]
setup(
name='sppl',
version=__version__,
description='The Sum-Product Probabilistic Language',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
url='https://github.com/probcomp/sppl',
license='Apache-2.0',
maintainer='Feras A. Saad',
maintainer_email='fsaad@mit.edu',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Mathematics',
],
packages=[
'sppl',
'sppl.compilers',
'sppl.magics',
'sppl.tests',
],
package_dir={
'sppl' : 'src',
'sppl.compilers' : 'src/compilers',
'sppl.magics' : 'magics',
'sppl.tests' : 'tests',
},
install_requires=requirements['src'],
extras_require=requirements,
python_requires='>=3.6',
)
|
#code to get images from open cv into python
#webcam_cnn_pipeline is a local module so export path first
#!export PYTHONPATH="$PYTHONPATH:/Users/alexpapiu/Documents/Conv/OpenCV_CNN"
from webcam_cnn_pipeline import *
LR = 0.0003
#setting up camera:
cp = cv2.VideoCapture(0)
cp.set(3, 256)
cp.set(4, 144)
os.chdir("/Users/alexpapiu/Documents/Data/OpenCV_CNN")
N = 50
#creating datasets:
print("First Label")
X_1 = imgs_to_arr(cp = cp, nr = N, nframe = 5)
time.sleep(5)
print("Second Label")
X_2 = imgs_to_arr(cp = cp, nr = N, nframe = 5)
#X_3 = imgs_to_arr(cp = cp, nr = 200, nframe = 10)
X, y = create_matrices(X_1, X_2)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("loading previous weights")
model = load_model("basic_model")
model.compile(loss = "binary_crossentropy", optimizer = adam(lr = LR), metrics = ["accuracy"])
model.save_weights("fine_tuned_weights")
X_tr, X_val, y_tr, y_val = train_test_split(X, y, stratify = y, random_state = 3, test_size = 0.15)
print("fine tuning training model")
model.fit(X_tr, y_tr, validation_data = (X_val, y_val), nb_epoch=3, batch_size=32)
#dict for label:
labelz = {0:"Open", 1:"Closed"}
real_time_pred(model, labelz, nframes = 10000)
|
def swap(coords, matrix):
x_1, y_1, x_2, y_2 = map(int, coords)
if 0 <= x_1 < rows_count and 0 <= y_1 < cols_count and 0 <= x_2 < rows_count and 0 <= y_2 < cols_count:
matrix[x_1][y_1], matrix[x_2][y_2] = matrix[x_2][y_2], matrix[x_1][y_1]
[print(" ".join(map(str, x))) for x in matrix]
else:
print("Invalid input!")
matrix = []
rows_count, cols_count = map(int, input().split())
for _ in range(rows_count):
matrix.append([x for x in input().split()])
while True:
command = input()
command = command.split()
if "END" in command:
break
elif "swap" in command and len(command) == 5:
coords = command[1:]
swap(coords, matrix)
else:
print("Invalid input!")
|
# coding: utf-8
# flake8: noqa
"""
Jordskredvarsel API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from varsom_landslide_client.models.alert import Alert
from varsom_landslide_client.models.alert_info import AlertInfo
from varsom_landslide_client.models.alert_info_area import AlertInfoArea
from varsom_landslide_client.models.alert_info_area_geocode import AlertInfoAreaGeocode
from varsom_landslide_client.models.alert_info_event_code import AlertInfoEventCode
from varsom_landslide_client.models.alert_info_parameter import AlertInfoParameter
from varsom_landslide_client.models.alert_info_resource import AlertInfoResource
from varsom_landslide_client.models.cause import Cause
from varsom_landslide_client.models.code_page_data_item import CodePageDataItem
from varsom_landslide_client.models.county import County
from varsom_landslide_client.models.decoder_fallback import DecoderFallback
from varsom_landslide_client.models.encoder_fallback import EncoderFallback
from varsom_landslide_client.models.encoding import Encoding
from varsom_landslide_client.models.formatted_content_result_alert import FormattedContentResultAlert
from varsom_landslide_client.models.formatted_content_result_list_alert import FormattedContentResultListAlert
from varsom_landslide_client.models.i_required_member_selector import IRequiredMemberSelector
from varsom_landslide_client.models.media_type_formatter import MediaTypeFormatter
from varsom_landslide_client.models.media_type_header_value import MediaTypeHeaderValue
from varsom_landslide_client.models.media_type_mapping import MediaTypeMapping
from varsom_landslide_client.models.micro_blog_post import MicroBlogPost
from varsom_landslide_client.models.municipality import Municipality
from varsom_landslide_client.models.name_value_header_value import NameValueHeaderValue
from varsom_landslide_client.models.station import Station
from varsom_landslide_client.models.warning import Warning
|
from __future__ import print_function
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.tests.helper import assert_quantity_allclose
from ...core import PixCoord
from ...tests.helpers import make_simple_wcs
from ..annulus import CircleAnnulusPixelRegion, CircleAnnulusSkyRegion
from .test_common import BaseTestPixelRegion, BaseTestSkyRegion
from .utils import ASTROPY_LT_13
class TestCircleAnnulusPixelRegion(BaseTestPixelRegion):
reg = CircleAnnulusPixelRegion(PixCoord(3, 4), inner_radius=2, outer_radius=3)
sample_box = [0, 6, 1, 7]
inside = [(3, 2)]
outside = [(3, 0)]
expected_area = 5 * np.pi
expected_repr = '<CircleAnnulusPixelRegion(PixCoord(x=3, y=4), inner radius=2, outer radius=3)>'
expected_str = 'Region: CircleAnnulusPixelRegion\ncenter: PixCoord(x=3, y=4)\ninner radius: 2\nouter radius: 3'
skycoord = SkyCoord(3 * u.deg, 4 * u.deg, frame='icrs')
wcs = make_simple_wcs(skycoord, 5 * u.arcsec, 20)
def test_init(self):
assert_quantity_allclose(self.reg.center.x, 3)
assert_quantity_allclose(self.reg.center.y, 4)
assert_quantity_allclose(self.reg.inner_radius, 2)
assert_quantity_allclose(self.reg.outer_radius, 3)
def test_transformation(self):
skyannulus = self.reg.to_sky(wcs=self.wcs)
assert isinstance(skyannulus, CircleAnnulusSkyRegion)
class TestCircleAnnulusSkyRegion(BaseTestSkyRegion):
reg = CircleAnnulusSkyRegion(SkyCoord(3 * u.deg, 4 * u.deg), 20 * u.arcsec, 30 * u.arcsec)
skycoord = SkyCoord(3 * u.deg, 4 * u.deg, frame='icrs')
wcs = make_simple_wcs(skycoord, 5 * u.arcsec, 20)
if ASTROPY_LT_13:
expected_repr = ('<CircleAnnulusSkyRegion(<SkyCoord (ICRS): (ra, dec) in '
'deg\n (3.0, 4.0)>, inner radius=20.0 arcsec, outer radius=30.0 arcsec)>')
expected_str = ('Region: CircleAnnulusSkyRegion\ncenter: <SkyCoord (ICRS): '
'(ra, dec) in deg\n (3.0, 4.0)>\ninner radius: 20.0 '
'arcsec\nouter radius: 30.0 arcsec')
else:
expected_repr = ('<CircleAnnulusSkyRegion(<SkyCoord (ICRS): (ra, dec) in '
'deg\n ( 3., 4.)>, inner radius=20.0 arcsec, outer radius=30.0 arcsec)>')
expected_str = ('Region: CircleAnnulusSkyRegion\ncenter: <SkyCoord (ICRS): '
'(ra, dec) in deg\n ( 3., 4.)>\ninner radius: 20.0 '
'arcsec\nouter radius: 30.0 arcsec')
def test_init(self):
assert_quantity_allclose(self.reg.center.ra, self.skycoord.ra)
assert_quantity_allclose(self.reg.inner_radius, 20*u.arcsec)
assert_quantity_allclose(self.reg.outer_radius, 30*u.arcsec)
def test_contains(self):
assert not self.reg.contains(self.skycoord, self.wcs)
test_coord = SkyCoord(3 * u.deg, 10 * u.deg, frame='icrs')
assert not self.reg.contains(test_coord, self.wcs)
def test_transformation(self):
pixannulus = self.reg.to_pixel(wcs=self.wcs)
assert isinstance(pixannulus, CircleAnnulusPixelRegion)
|
# Search the Dice Jobs API
# lame docs: http://www.dice.com/common/content/util/apidoc/jobsearch.html
# author: nxkennedy
'''
Example response:
{"detailUrl":"http://www.dice.com/job/result/10347349a/749028?src\u003d19","jobTitle":"Front-End Web Developer","company":"The Doyle Group","location":"Denver, CO","date":"2017-01-18"}
'''
import requests
import csv
from os.path import exists
from sys import argv
def format_search(terms):
print(terms)
words = [x for x in terms.split(' ') if x]
print(words)
query = 'text=' + '+'.join(words) + '&age=30' + '&sort=1'
print(query)
# age - (optional) specify a posting age (a.k.a. days back)
# sort - (optional) sort=1 sorts by posted age, sort=2 sorts by job title, sort=3 sorts by company, sort=4 sorts by location
baseURL= 'http://service.dice.com/api/rest/jobsearch/v1/simple.json?'
url = baseURL + query
print("\nRequested URL:", url + '\n')
return url
def write_to_file(jobListings):
with open('jobs.csv', 'w') as csvFile:
fieldnames = ['Job Title', 'Company', 'Location', 'Posted', 'URL']
writer = csv.DictWriter(csvFile, fieldnames=fieldnames)
writer.writeheader()
for job in jobListings:
writer.writerow({'Job Title': job['jobTitle'], 'Company': job['company'], 'Location': job['location'], 'Posted': job['date'], 'URL': job['detailUrl']})
print("Finished writing file.")
def search(terms):
response = requests.get(format_search(terms)).json()
rawData = response
print(rawData['count'], 'total results')
print(rawData['lastDocument'], 'results per page')
jobListings = rawData['resultItemList']
write_to_file(jobListings)
if __name__ == '__main__':
s = input('Key word(s) to search?\n> ')
search(s)
|
__author__ = 'fractus.io'
import api.items
|
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import pre_save
from django.utils.text import slugify
from django.utils import timezone
class PostManager(models.Manager):
def active(self, *args, **kwargs):
return super(PostManager, self).filter(
draft=False).filter(publish__lte=timezone.now())
def upload_location(instance, filename):
return "{0}/{1}".format(instance.id, filename)
class Post(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1)
title = models.CharField(max_length=120)
slug = models.SlugField(unique=True, default='abv')
image = models.ImageField(upload_to=upload_location,
null=True,
blank=True,
width_field="width_field",
height_field="height_field")
width_field = models.IntegerField(default=0)
height_field = models.IntegerField(default=0)
content = models.TextField()
draft = models.BooleanField(default=False)
publish = models.DateField(auto_now_add=False, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True, auto_now=False)
objects = PostManager()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("posts:detail", kwargs={"slug": self.slug})
class Meta:
ordering = ["-timestamp", "-updated"]
def pre_save_slug_post(sender, instance, *args, **kwargs):
slug = slugify(instance.title)
instance.slug = slug
pre_save.connect(pre_save_slug_post, sender=Post)
|
from flask import Flask
app = Flask('aone_app')
import aone_app.main
|
import itertools
from typing import List
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
res = []
def dfs(begin,path):
res.append(path[:])
if begin==len(nums):
return
for i in range(begin,len(nums)):
path.append(nums[i])
dfs(i+1,path)
path.pop(-1)
dfs(0,[])
print(res)
Solution().subsets([1,2,3])
|
import xmlrpc.client
import xmlrpc
import os
import argparse
import random
def handle(s, btFile, secret):
print('handle bittorrent file: ', str(btFile))
ret=s.aria2.addTorrent('token:'+secret, xmlrpc.client.Binary(open(btFile, mode='rb').read()),[],{'pause':'true'})
print("add bt: ",str(ret))
waiting = s.aria2.tellWaiting('token:'+secret, 0, 1000,
["gid", "totalLength", "completedLength", "uploadSpeed", "downloadSpeed", "connections",
"numSeeders", "seeder", "status", "errorCode", "verifiedLength",
"verifyIntegrityPending", "files", "bittorrent", "infoHash"])
for w in waiting:
gid=w['gid']
if gid!=ret:
continue
#print(w['gid'],w['files'])
# max-selection strategy
maxLen=0
maxFPath=''
maxFIndex='0'
for f in w['files']:
print(f['length'],f['path'])
if int(f['length'])>maxLen:
maxLen=int(f['length'])
maxFPath=f['path']
maxFIndex=f['index']
print('max file: ',str(maxLen),maxFIndex,str(maxFPath))
# max-selection strategy end
cret=s.aria2.changeOption('token:'+secret, gid,{'select-file':maxFIndex})# select multiple files example: 'select-file':'5,6,7,8'
print('select file: ',cret)
tret=s.aria2.tellStatus('token:'+secret, gid)
print('after selection: ', tret['files'][int(maxFIndex)-1])
uret=s.aria2.unpause('token:'+secret, gid)
print('unpause: ',uret)
print('over: ',str(btFile))
os.remove(btFile)
def handleMag(s, mgFile, secret):
print('handle mag file: ', str(mgFile))
if os.path.getsize(mgFile):
ret=s.aria2.addUri('token:'+secret, [xmlrpc.client.Binary(open(mgFile, mode='rb').read())])
print("add mag: ",str(ret))
print("remove mag file: ",str(mgFile))
os.remove(mgFile)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.description = 'bt批量导入aria2,并选择文件大小最大的文件进行下载'
parser.add_argument("num", help="the num to be added once", type=int)
parser.add_argument("server", help="like: http://192.168.3.99:6800/", type=str)
parser.add_argument("dir", help="the dir of your bittorrents", type=str)
parser.add_argument("mgdir", help="the dir of your magnets", type=str)
parser.add_argument("secret", help="secrets", type=str)
parser.add_argument("name", help="mag name", type=str)
args = parser.parse_args()
s = xmlrpc.client.ServerProxy(args.server+"rpc")
flist=os.listdir(args.dir)
name = args.name
for i in range(0, len(flist)):
if flist[i].endswith(".torrent"):
btFile = os.path.join(args.dir, flist[i])
if os.path.isfile(btFile):
handle(s,btFile,args.secret)
temp = []
for root, dirs, files in os.walk(args.mgdir):
for file in files:
if name != "any" and root.lower().replace('-','').find(name.lower().replace('-','')) == -1:
continue
if file.endswith(".txt"):
temp.append(os.path.join(root, file))
if len(temp) > 0:
dn = args.num
if args.num > len(temp):
dn = len(temp)
for mgFile in random.sample(temp, dn):
print("path:",str(mgFile))
handleMag(s,mgFile,args.secret)
print("Add magnets to air2 Done")
else:
print("No magnets to air2 !!!")
|
import warnings
from typing import Union, Tuple, Dict
import pygame
from pygame_gui.core import ObjectID
from pygame_gui.core.interfaces import IContainerLikeInterface, IUIManagerInterface
from pygame_gui.core import ColourGradient, UIElement
from pygame_gui.core.utility import render_white_text_alpha_black_bg, apply_colour_to_surface
from pygame_gui.core.utility import basic_blit
class UILabel(UIElement):
"""
A label lets us display a single line of text with a single font style. It's a quick to
rebuild and simple alternative to the text box element.
:param relative_rect: The rectangle that contains and positions the label relative to it's
container.
:param text: The text to display in the label.
:param manager: The UIManager that manages this label.
:param container: The container that this element is within. If set to None will be the root
window's container.
:param parent_element: The element this element 'belongs to' in the theming hierarchy.
:param object_id: A custom defined ID for fine tuning of theming.
:param anchors: A dictionary describing what this element's relative_rect is relative to.
:param visible: Whether the element is visible by default. Warning - container visibility
may override this.
"""
def __init__(self, relative_rect: pygame.Rect,
text: str,
manager: IUIManagerInterface,
container: Union[IContainerLikeInterface, None] = None,
parent_element: UIElement = None,
object_id: Union[ObjectID, str, None] = None,
anchors: Dict[str, str] = None,
visible: int = 1):
super().__init__(relative_rect, manager, container,
starting_height=1,
layer_thickness=1,
anchors=anchors,
visible=visible)
self._create_valid_ids(container=container,
parent_element=parent_element,
object_id=object_id,
element_id='label')
self.text = text
# initialise theme params
self.font = None
self.bg_colour = None
self.text_colour = None
self.disabled_text_colour = None
self.text_shadow_colour = None
self.text_shadow = False
self.text_shadow_size = 1
self.text_shadow_offset = (0, 0)
self.rebuild_from_changed_theme_data()
def set_text(self, text: str):
"""
Changes the string displayed by the label element. Labels do not support HTML styling.
:param text: the text to set the label to.
"""
if text != self.text:
self.text = text
self.rebuild()
def rebuild(self):
"""
Re-render the text to the label's underlying sprite image. This allows us to change what
the displayed text is or remake it with different theming (if the theming has changed).
"""
text_size = self.font.size(self.text)
if text_size[1] > self.relative_rect.height or text_size[0] > self.relative_rect.width:
width_overlap = self.relative_rect.width - text_size[0]
height_overlap = self.relative_rect.height - text_size[1]
warn_text = ('Label Rect is too small for text: '
'' + self.text + ' - size diff: ' + str((width_overlap, height_overlap)))
warnings.warn(warn_text, UserWarning)
new_image = pygame.surface.Surface(self.relative_rect.size,
flags=pygame.SRCALPHA,
depth=32)
if isinstance(self.bg_colour, ColourGradient):
new_image.fill(pygame.Color('#FFFFFFFF'))
self.bg_colour.apply_gradient_to_surface(new_image)
text_render = render_white_text_alpha_black_bg(self.font, self.text)
if self.is_enabled:
if isinstance(self.text_colour, ColourGradient):
self.text_colour.apply_gradient_to_surface(text_render)
else:
apply_colour_to_surface(self.text_colour, text_render)
else:
if isinstance(self.disabled_text_colour, ColourGradient):
self.disabled_text_colour.apply_gradient_to_surface(text_render)
else:
apply_colour_to_surface(self.disabled_text_colour, text_render)
else:
new_image.fill(self.bg_colour)
if self.is_enabled:
if isinstance(self.text_colour, ColourGradient):
text_render = render_white_text_alpha_black_bg(self.font, self.text)
self.text_colour.apply_gradient_to_surface(text_render)
else:
if self.bg_colour.a != 255 or self.text_shadow:
text_render = render_white_text_alpha_black_bg(self.font, self.text)
apply_colour_to_surface(self.text_colour, text_render)
else:
text_render = self.font.render(self.text, True,
self.text_colour, self.bg_colour)
text_render = text_render.convert_alpha()
else:
if isinstance(self.disabled_text_colour, ColourGradient):
text_render = render_white_text_alpha_black_bg(self.font, self.text)
self.disabled_text_colour.apply_gradient_to_surface(text_render)
else:
if self.bg_colour.a != 255 or self.text_shadow:
text_render = render_white_text_alpha_black_bg(self.font, self.text)
apply_colour_to_surface(self.disabled_text_colour, text_render)
else:
text_render = self.font.render(self.text, True,
self.disabled_text_colour, self.bg_colour)
text_render = text_render.convert_alpha()
text_render_rect = text_render.get_rect(centerx=int(self.rect.width / 2),
centery=int(self.rect.height / 2))
if self.text_shadow:
self._rebuild_shadow(new_image, text_render_rect)
basic_blit(new_image, text_render, text_render_rect)
self.set_image(new_image)
def _rebuild_shadow(self, new_image, text_render_rect):
shadow_text_render = render_white_text_alpha_black_bg(self.font, self.text)
apply_colour_to_surface(self.text_shadow_colour, shadow_text_render)
for y_pos in range(-self.text_shadow_size, self.text_shadow_size + 1):
shadow_text_rect = pygame.Rect((text_render_rect.x + self.text_shadow_offset[0],
text_render_rect.y + self.text_shadow_offset[1]
+ y_pos),
text_render_rect.size)
basic_blit(new_image, shadow_text_render, shadow_text_rect)
for x_pos in range(-self.text_shadow_size, self.text_shadow_size + 1):
shadow_text_rect = pygame.Rect((text_render_rect.x + self.text_shadow_offset[0]
+ x_pos,
text_render_rect.y + self.text_shadow_offset[1]),
text_render_rect.size)
basic_blit(new_image, shadow_text_render, shadow_text_rect)
for x_and_y in range(-self.text_shadow_size, self.text_shadow_size + 1):
shadow_text_rect = pygame.Rect(
(text_render_rect.x + self.text_shadow_offset[0] + x_and_y,
text_render_rect.y + self.text_shadow_offset[1] + x_and_y),
text_render_rect.size)
basic_blit(new_image, shadow_text_render, shadow_text_rect)
for x_and_y in range(-self.text_shadow_size, self.text_shadow_size + 1):
shadow_text_rect = pygame.Rect(
(text_render_rect.x + self.text_shadow_offset[0] - x_and_y,
text_render_rect.y + self.text_shadow_offset[1] + x_and_y),
text_render_rect.size)
basic_blit(new_image, shadow_text_render, shadow_text_rect)
def rebuild_from_changed_theme_data(self):
"""
Checks if any theming parameters have changed, and if so triggers a full rebuild of
the element.
"""
super().rebuild_from_changed_theme_data()
any_changed = False
font = self.ui_theme.get_font(self.combined_element_ids)
if font != self.font:
self.font = font
any_changed = True
text_colour = self.ui_theme.get_colour_or_gradient('normal_text', self.combined_element_ids)
if text_colour != self.text_colour:
self.text_colour = text_colour
any_changed = True
disabled_text_colour = self.ui_theme.get_colour_or_gradient('disabled_text',
self.combined_element_ids)
if disabled_text_colour != self.disabled_text_colour:
self.disabled_text_colour = disabled_text_colour
any_changed = True
bg_colour = self.ui_theme.get_colour_or_gradient('dark_bg', self.combined_element_ids)
if bg_colour != self.bg_colour:
self.bg_colour = bg_colour
any_changed = True
text_shadow_colour = self.ui_theme.get_colour('text_shadow', self.combined_element_ids)
if text_shadow_colour != self.text_shadow_colour:
self.text_shadow_colour = text_shadow_colour
any_changed = True
def parse_to_bool(str_data: str):
return bool(int(str_data))
if self._check_misc_theme_data_changed(attribute_name='text_shadow',
default_value=False,
casting_func=parse_to_bool):
any_changed = True
if self._check_misc_theme_data_changed(attribute_name='text_shadow_size',
default_value=1,
casting_func=int):
any_changed = True
if self._check_misc_theme_data_changed(attribute_name='text_shadow_size',
default_value=1,
casting_func=int):
any_changed = True
def tuple_extract(str_data: str) -> Tuple[int, int]:
return int(str_data.split(',')[0]), int(str_data.split(',')[1])
if self._check_misc_theme_data_changed(attribute_name='text_shadow_offset',
default_value=(0, 0),
casting_func=tuple_extract):
any_changed = True
if any_changed:
self.rebuild()
def set_dimensions(self, dimensions: Union[pygame.math.Vector2,
Tuple[int, int],
Tuple[float, float]]):
"""
Method to directly set the dimensions of a label.
:param dimensions: The new dimensions to set.
"""
super().set_dimensions(dimensions)
if dimensions[0] >= 0 and dimensions[1] >= 0:
self.rebuild()
def disable(self):
"""
Disables the label so that its text changes to the disabled colour.
"""
if self.is_enabled:
self.is_enabled = False
self.rebuild()
def enable(self):
"""
Re-enables the label so that its text changes to the normal colour
"""
if not self.is_enabled:
self.is_enabled = True
self.rebuild()
|
# -*- coding:utf8 -*-
import time
import math
import random
from lib.control.Control import Control
from lib.unit.Player import Player
from lib.struct.Point import Point
from lib.base.Base import Base
# 导航基类
class NvBase(Base):
def __init__(self):
Base.__init__(self)
self.hander = open("tmp/logs/" + self.getFormatTime(False) + "_navbase.log", 'a+')
# 获取坐标区域中里自己最近的点
def getNearstPoint(self, now_pos: Point, all_pos):
start = time.time()
start_distance = float("inf")
print("start_distance:" + str(start_distance))
for pos in all_pos:
now_distance = self.get_distance_off_tow_pos(pos, now_pos)
print(pos.toString() + " now_distance:" + str(now_distance))
if now_distance < start_distance:
near_pos = pos
start_distance = now_distance
print("get_nearest_pos time:" + str(time.time() - start))
return near_pos # 别担心,这里肯定能找到,难道还有比无穷大更大的么!!!
# 根据三点坐标计算角度(转向角度)
# a为起始点和当前点那条边
# b为起始点到目标点那条边
# c为当前点到目标点那条边
# 设:S为起始点,N为当前点,T为目标点
# 先用勾股定理计算出三角形三边长,再用反余弦计算弧度和角度
# 角度为ab的夹角度数 + bc夹角度数
def posToDegree(self, startPos: Point, nowPos: Point, targetPos: Point):
a = self.get_distance_off_tow_pos(startPos, nowPos)
b = self.get_distance_off_tow_pos(startPos, targetPos)
c = self.get_distance_off_tow_pos(nowPos, targetPos)
print("a = " + str(a) + ", b = " + str(b) + ", c = " + str(c), file=self.hander)
if a == 0: # 为0可能卡地形了
return 9999
if b == 0:
print("起始点和目标点重合了")
print("起始点和目标点重合了", file=self.hander)
return 0
if c == 0:
print("当前点点和目标点重合了")
print("当前点点和目标点重合了", file=self.hander)
return 0
# 反余弦的参数值之必需在【-1,1】这个区间,由于电脑浮点数计算误差可能出现大于1或者小于-1的情况,这里要兼容处理下
tmp = (a ** 2 + b ** 2 - c ** 2) / (2 * a * b)
tmp = 1 if tmp >= 1 else tmp
tmp = -1 if tmp <= -1 else tmp
degreeNST = math.degrees(math.acos(tmp)) # 角NST的度数
tmp = (b ** 2 + c ** 2 - a ** 2) / (2 * b * c)
tmp = 1 if tmp >= 1 else tmp
tmp = -1 if tmp <= -1 else tmp
degreeNTS = math.degrees(math.acos(tmp)) # 角NTS的度数
degree = degreeNST + degreeNTS # 实际要修正的角度应该是角SNT的补角,即三角形另外两个内角的和
print("角度 = " + str(degree), file=self.hander)
return degree
# 计算两个点之间的距离(使用距离公式)
def get_distance_off_tow_pos(self, p1: Point, p2: Point):
return round(((p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2) ** 0.5, 8)
# 判断应该左转还是右转,使用了直线方程
def leftOrRight(self, startPos: Point, nowPos: Point, targetPos: Point):
x1 = startPos.x
y1 = startPos.y
x2 = targetPos.x
y2 = targetPos.y
X = nowPos.x
Y = nowPos.y
# 特殊情况(基本不可能出现):起始坐标和目的坐标的x轴数据相同,则是垂直于x轴的直线,无斜率
if x1 == x2:
if X < x1: # 目的坐标在直线左边
if Y < y2: # 目的坐标在当前坐标上面
return 'right'
return 'left'
else: # 目的坐标在直线右边
if Y < y2: # 目的坐标在当前左边下面
return 'left'
return 'right'
# 构建起始点到目标点两点直线方程为:y = kx + b, 那么给定一点当前点P(X,Y)如果kX+b>Y,则P在直线下方,反之则在上方,相等表明在直线上.
k = (y2 - y1) / (x2 - x1) # 斜率
b = y1 - k * x1 # 截距
print("直线方程:y = " + str(k) + "x + " + str(b), file=self.hander)
ret = k * X + b # 将当前坐标带入直线方程看看该坐标在直线上方还是在直线下方
if ret >= Y: # 在下方
print("当前点在直线下方", file=self.hander)
if x2 < X:
return 'left'
return 'right'
else: # 在上方
print("当前点在直线上方", file=self.hander)
if x2 > X:
return 'left'
return 'right'
# @Deprecated
# 向量叉积法计算当前点在目标点与起始点所在直线的左侧还是右侧
# 设起始点为S 当前点为N 目标点为T
def leftOrRightByVector(self, startPos: Point, nowPos: Point, targetPos: Point):
X = nowPos.x
Y = nowPos.y
ST = Point(targetPos.x - startPos.x, targetPos.y - startPos.y) # 起始点到目标点向量
SN = Point(nowPos.x - startPos.x, nowPos.y - startPos.y) # 起始点到当前点向量
ds = (ST.x * SN.y) - (SN.x * ST.y) # 向量叉积
if ds > 0: # 在直线左边
if Y < targetPos.y:
return "left"
return "right"
elif ds < 0: # 在直线右边
if Y < targetPos.y:
return "right"
return "left"
else: # 在直线上
return "on the line"
# 根据朝向计算左右转(使用facing)
def leftOrRightByFacing(self, playerFacing, nowPos: Point, targetPos: Point):
slope = self.calNowToTargetFacing(nowPos, targetPos)
directionDiff = slope - playerFacing # 目标点弧度与当前弧度差
print("弧度差:" + str(directionDiff), file=self.hander)
if directionDiff > math.pi:
directionDiff = ((math.pi * 2) - directionDiff) * -1 # 大于180° 取另一个角,即360°-directionDiff,并且取负
if directionDiff < -math.pi:
directionDiff = (math.pi * 2) - (directionDiff * -1) # 小于负180° 其实和大于180°一个意思,同样取晓得一个角,并且取正
print("处理后的弧度差:" + str(directionDiff), file=self.hander)
if directionDiff > 0:
return "left"
return "right"
# 计算当前点到目标点的弧度
def calNowToTargetFacing(self, nowPos: Point, targetPos: Point):
slope = math.atan2(targetPos.y - nowPos.y, nowPos.x - targetPos.x) # 反正切计算方位角弧度,即与x轴夹角
print("反正切值:" + str(slope), file=self.hander)
slope = slope + math.pi # 因为反正切函数值域为(-π/2,π/2),需要给他转换到0-2π范围
slope = slope - math.pi * 0.5 # 此弧度是从当前点到目标点的绝对弧度,需要转换成wow的弧度,左旋90°。使上(北)为0,而不是右,和wow保持一致
if slope < 0:
slope = slope + math.pi * 2 # 确保弧度不是一个负数
if slope > math.pi * 2: # 也要确保弧度不会超过2π
slope = slope - math.pi * 2
print("反正切值处理后的值:" + str(slope), file=self.hander)
return slope
# 根据角度计算转向时间(幅度),测试0.52秒大概90度
def degreeToTime(self, degree):
one_degree_time = 0.51 / 90 # 转1度需要多少秒
return one_degree_time * degree
# 坐标系顺时针旋转degree
def clockWiseSpin(self, point: Point, degree):
x = point.x * math.cos(math.radians(degree)) + point.y * math.sin(math.radians(degree))
y = point.y * math.cos(math.radians(degree)) - point.x * math.sin(math.radians(degree))
return Point(x, y)
# 坐标系逆时针旋转degree
def counterClockWiseSpin(self, point: Point, degree):
x = point.x * math.cos(math.radians(degree)) - point.y * math.sin(math.radians(degree))
y = point.y * math.cos(math.radians(degree)) + point.x * math.sin(math.radians(degree))
return Point(x, y)
|
import os
import sys
import time
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from mrinet.unet_wavelet import UNetWavelet
from data.metrics import MetricEval
class DeGibbs(object):
"""A U-Net model for Gibbs artifact correction.
This initializes a wavelet U-Net model intended for artifact correciton in
medical images. The script allows attachment of PyTorch data loader
objects and associated transforms, in principle allowing the correction of
any artifacts simulated by the transforms.
By default, the model output parameters are saved in
'exp_dir/best_model.pt'
Args:
nlayers (int): Number of layers for U-Net.
in_ch (int): Number of input channels (usually 1 for magnitude, 2 for
complex).
out_ch (int): Number of output channels.
comp2mag (boolean): A flag for whether to output magnitude outputs when
given complex inputs. If True, then performs the magnitude operation
as a final step of the U-Net.
leaky (boolean): If true, use leaky ReLUs instead of normal ones.
device (torch.device): Use torch.device('cuda') for GPU or
torch.device('cpu') for CPU.
Examples:
Initialization:
>>> ob = DeGibbs(**params)
Loading parameters from file:
>>> ob = ob.load_model(model_file)
Attaching a dataloader:
>>> ob = ob.attach_data(exp_type, train_loader, val_loader)
Training:
>>> ob = ob.fit(num_epochs)
Run current model on example:
>>> out = ob(in)
"""
def __init__(
self,
nlayers,
in_ch,
out_ch,
nchans,
comp2mag,
leaky,
device,
dtype=torch.double,
):
self.optimizer = None
self.train_data_dir = None
self.model = UNetWavelet(
ndims=2,
nlayers=nlayers,
in_ch=in_ch,
out_ch=out_ch,
top_filtnum=nchans,
resid=True,
wave_concat=True,
comp2mag=comp2mag,
leaky=leaky,
)
self.model = self.model.to(device=device, dtype=dtype)
self.device = device
self.dtype = dtype
def __call__(self, x):
return self.model(x)
def load_model(self, model_file, device=None, dtype=None):
"""Loads model parameters from .pt file.
Args:
model_file (str): A directory pointing to the .pt file with
parameters.
device (torch.device, default=None): Device to send model to after
loading. If None, uses device supplied by init function.
Returns:
self
"""
if not device == None:
self.device = device
self.model = self.model.to(device)
if not dtype == None:
self.dtype = dtype
self.model = self.model.to(dtype=dtype)
pytorch_total_params = sum(
p.numel() for p in self.model.parameters() if p.requires_grad
)
print("network params: {}".format(pytorch_total_params))
if os.path.isfile(model_file):
params = self.model.state_dict()
print("loading model from file: {}".format(model_file))
checkpt = torch.load(model_file, map_location=self.device)
state_dict = checkpt["state_dict"]
params.update(state_dict)
self.model.load_state_dict(params)
else:
print("model file {} not found".format(model_file))
self.model = self.model.eval()
return self
def init_optimizer(self, learning_rate, loss_fn, model_file=None):
"""Loads model parameters from .pt file.
Currently, only the PyTorch Adam optimizer is implemented.
Args:
learning_rate (double): The Adam learning rate.
loss_fn (torch.loss_fn): The PyTorch loss function
(e.g., torch.nn.MSELoss).
model_file (str): If not None, then loads the optimizer state from
the model file.
Returns:
self
"""
self.epoch = 0
self.train_loss_min = np.inf
self.val_loss_min = np.inf
self.loss_fn = loss_fn
optimizer = torch.optim.Adam(self.model.parameters(), lr=learning_rate)
if os.path.isfile(model_file) and model_file is not None:
print("loading optimizer from file: {}".format(model_file))
checkpt = torch.load(model_file)
optimizer.load_state_dict(checkpt["optimizer"])
self.epoch = checkpt["epoch"] + 1
train_loss_min = self.train_loss_min
try:
self.train_loss_min = checkpt["train_loss_min"]
except:
self.train_loss_min = train_loss_min
val_loss_min = self.val_loss_min
try:
self.val_loss_min = checkpt["val_loss_min"]
except:
self.val_loss_min = val_loss_min
elif not os.path.isfile(model_file):
print("model file {} not found".format(model_file))
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUS!")
self.model = torch.nn.DataParallel(self.model)
self.optimizer = optimizer
self.learning_rate = learning_rate
return self
def attach_data(self, exp_type, train_loader, val_loader, train_data_dir=None):
"""Attaches a dataloader for training.
Args:
exp_type (str): Experiment type (used for visualizations).
train_loader (torch.DataLoader): The loader for the training split.
val_loader (torch.DataLoader): The loader for the validation split.
train_data_dir (str, default=None): Stored as attribute for print
statements.
Returns:
self
"""
self.train_loader = train_loader
self.val_loader = val_loader
self.exp_type = exp_type
self.train_data_dir = train_data_dir
return self
def _save_checkpoint(self, filename):
"""Save current model state.
Args:
filename (str): File name for .pt file to save model.
"""
try:
state = {
"state_dict": self.model.module.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
except AttributeError:
state = {
"state_dict": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
for attr, value in self.__dict__.items():
if (
(not "model" in attr)
and (not "optimizer" in attr)
and (not "loss_fn" in attr)
):
state[attr] = value
torch.save(state, filename)
def run_val_loop(self, visuals, rnd_seed, all_metrics=False):
"""Run a loop over the validation data split.
Args:
visuals (dict): A dictionary object storing all arrays for
visualization.
rnd_seed (int): Seed for numpy, PyTorch, and CUDA (for
reproducibility).
Returns:
val_loss (double): Validation loss.
visuals (dict): Dictionary visuals object with validation images.
"""
print("epoch {}: validation loop".format(self.epoch))
# set random seed
torch.manual_seed(rnd_seed)
np.random.seed(rnd_seed)
torch.cuda.manual_seed(rnd_seed)
device = self.device
self.model = self.model.eval()
disp_frac_vec = np.array(range(11))
disp_val = disp_frac_vec[0]
disp_counter = 0
if all_metrics:
metrics = {"mse": [], "ssim": [], "nmse": [], "psnr": []}
with torch.no_grad():
val_loss = 0
counter = 0
for i, val_batch in enumerate(self.val_loader):
val_target, val_dat = (
val_batch["target"].to(device),
val_batch["dat"].to(device),
)
val_est = self.model(val_dat)
val_loss = (
val_loss * counter + self.loss_fn(val_est, val_target).item()
) / (counter + 1)
counter = counter + 1
if all_metrics:
for ind in range(val_target.shape[0]):
nptarget = val_target[ind].cpu().numpy()
if nptarget.shape[0] > 1:
nptarget = np.sqrt(nptarget[0] ** 2 + nptarget[1] ** 2)
else:
nptarget = nptarget[0]
npest = val_est[ind].cpu().numpy()
if npest.shape[0] > 1:
npest = np.sqrt(npest[0] ** 2 + npest[1] ** 2)
else:
npest = npest[0]
metrics["mse"].append(MetricEval.mse(nptarget, npest))
metrics["ssim"].append(MetricEval.ssim(nptarget, npest))
metrics["nmse"].append(MetricEval.nmse(nptarget, npest))
metrics["psnr"].append(MetricEval.psnr(nptarget, npest))
if (i / len(self.val_loader)) >= (disp_val / 10):
print(
"validation loop progress: {:.0f}%".format(
100 * (i + 1) / len(self.val_loader)
)
)
disp_counter += 1
disp_val = disp_frac_vec[disp_counter]
print("validation loop finished")
dispind = 0
if val_target.shape[1] > 1: # assume dim 0, 1 are real, imag
tmp = np.squeeze(
val_target[dispind, ...].cpu().detach().float().numpy()
)
visuals["val_target"] = np.expand_dims(
np.sqrt(tmp[0, ...] ** 2 + tmp[1, ...] ** 2), 0
)
else:
visuals["val_target"] = np.absolute(
val_target[dispind, ...].cpu().detach().numpy()
)
if val_dat.shape[1] > 1: # assume dim 0, 1 are real, imag
tmp = np.squeeze(val_dat[dispind, ...].cpu().detach().float().numpy())
visuals["val_dat"] = np.expand_dims(
np.sqrt(tmp[0, ...] ** 2 + tmp[1, ...] ** 2), 0
)
else:
visuals["val_dat"] = np.absolute(
val_dat[dispind, ...].cpu().detach().numpy()
)
if val_est.shape[1] > 1: # assume dim 0, 1 are real, imag
tmp = np.squeeze(val_est[dispind, ...].cpu().detach().float().numpy())
visuals["val_est"] = np.expand_dims(
np.sqrt(tmp[0, ...] ** 2 + tmp[1, ...] ** 2), 0
)
else:
visuals["val_est"] = np.absolute(
val_est[dispind, ...].cpu().detach().float().numpy()
)
if all_metrics:
mse = np.array(metrics["mse"])
mse = mse[~np.isnan(mse)]
mse = mse[~np.isinf(mse)]
nmse = np.array(metrics["nmse"])
nmse = nmse[~np.isnan(nmse)]
nmse = nmse[~np.isinf(nmse)]
ssim = np.array(metrics["ssim"])
ssim = ssim[~np.isnan(ssim)]
ssim = ssim[~np.isinf(ssim)]
psnr = np.array(metrics["psnr"])
psnr = psnr[~np.isnan(psnr)]
psnr = psnr[~np.isinf(psnr)]
metrics["mse"] = mse
metrics["nmse"] = nmse
metrics["ssim"] = ssim
metrics["psnr"] = psnr
return metrics, visuals
else:
return val_loss, visuals
def run_train_loop(self, global_iter, visuals, rnd_seed):
"""Run a loop over the training data split.
The model is updated via the self.model attribute.
Args:
global_iter (int): Global iteration count (for print statements).
visuals (dict): A dictionary object storing all arrays for
visualization.
rnd_seed (int): Seed for numpy, PyTorch, and CUDA (for
reproducibility).
Returns:
global_iter (int): Updated global iteration.
visuals (dict): Dictionary visuals object with validation images.
disp_loss (double): A display loss for the training data taken as
an average from the last few images.
"""
print("epoch {}: training loop".format(self.epoch))
torch.manual_seed(rnd_seed)
np.random.seed(rnd_seed)
torch.cuda.manual_seed(rnd_seed)
device = self.device
self.model = self.model.train()
losses = []
disp_frac_vec = np.array(range(11))
disp_val = disp_frac_vec[0]
disp_counter = 0
for i, batch in enumerate(self.train_loader):
target, dat = batch["target"].to(device), batch["dat"].to(device)
est = self.model(dat)
loss = self.loss_fn(est, target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
np_train_loss = loss.item()
losses.append(np_train_loss)
losses = losses[-50:]
disp_loss = np.mean(losses)
global_iter = global_iter + 1
if (i / len(self.train_loader)) >= (disp_val / 10):
print(
"training loop progress: {:.0f}%".format(
100 * (i + 1) / len(self.train_loader)
)
)
disp_counter += 1
disp_val = disp_frac_vec[disp_counter]
print("training loop finished")
dispind = 0
if target.shape[1] > 1: # assume dim 0, 1 are real, imag
tmp = np.squeeze(target[dispind, ...].cpu().detach().float().numpy())
visuals["train_target"] = np.expand_dims(
np.sqrt(tmp[0, ...] ** 2 + tmp[1, ...] ** 2), 0
)
else:
visuals["train_target"] = np.absolute(
target[dispind, ...].cpu().detach().numpy()
)
if dat.shape[1] > 1: # assume dim 0, 1 are real, imag
tmp = np.squeeze(dat[dispind, ...].cpu().detach().float().numpy())
visuals["train_dat"] = np.expand_dims(
np.sqrt(tmp[0, ...] ** 2 + tmp[1, ...] ** 2), 0
)
else:
visuals["train_dat"] = np.absolute(dat[dispind, ...].cpu().detach().numpy())
if est.shape[1] > 1: # assume dim 0, 1 are real, imag
tmp = np.squeeze(est[dispind, ...].cpu().detach().float().numpy())
visuals["train_est"] = np.expand_dims(
np.sqrt(tmp[0, ...] ** 2 + tmp[1, ...] ** 2), 0
)
else:
visuals["train_est"] = np.absolute(
est[dispind, ...].cpu().detach().float().numpy()
)
return global_iter, visuals, disp_loss
def fit(self, num_epochs, exp_dir=None, run_eval=True, seed_offset=476):
"""Fit attached data loaders (i.e., train the model).
The model is updated via the self.model attribute.
Args:
num_epochs (int): Number of epochs to train.
exp_dir (str, default=None): String pointing to directory for
logging tensorboardX outputs.
run_eval (boolean, default=True): Whether to run validation loop.
seed_offset (int, default=476): Offset for random number seed (for
reproducibility).
Returns:
self
"""
print("starting training")
if not exp_dir == None:
print("saving logs to {}".format(exp_dir))
writer = SummaryWriter(log_dir=exp_dir)
visuals = {}
print("initializing loss tracking")
epochs = []
train_losses = []
val_losses = []
global_iter = 0
for epoch in range(self.epoch, num_epochs):
rnd_seed = np.random.get_state()[1][0] + self.epoch + seed_offset
self.epoch = epoch
epoch_start = time.time()
global_iter, visuals, train_loss = self.run_train_loop(
global_iter, visuals, rnd_seed
)
if run_eval:
val_loss, visuals = self.run_val_loop(visuals, seed_offset)
if not exp_dir == None:
writer.add_scalar(
"losses/eval_loss", scalar_value=val_loss, global_step=epoch
)
if not exp_dir == None:
writer.add_scalar(
"losses/train_loss", scalar_value=train_loss, global_step=epoch
)
for label, image in visuals.items():
if "val" in label:
image = image / np.max(image)
writer.add_image(
"validation/" + label, image, global_step=epoch
)
elif "train" in label:
image = image / np.max(image)
writer.add_image("training/" + label, image, global_step=epoch)
if run_eval:
val_losses.append(val_loss)
if val_loss < self.val_loss_min:
self.val_loss_min = val_loss
checkname = os.path.join(exp_dir, "best_model.pt")
else:
checkname = os.path.join(exp_dir, "model_epoch_{}.pt".format(epoch))
else:
if train_loss < self.train_loss_min:
checkname = os.path.join(exp_dir, "best_model.pt")
else:
checkname = os.path.join(exp_dir, "model_epoch_{}.pt".format(epoch))
if train_loss < self.train_loss_min:
self.train_loss_min = train_loss
train_losses.append(train_loss)
epochs.append(epoch)
self._save_checkpoint(checkname)
epoch_end = time.time()
print("epoch finished, time: {}".format(epoch_end - epoch_start))
print("validation loss: {}, training loss: {}".format(val_loss, train_loss))
return self
def __repr__(self):
self.model = self.model.train()
num_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
out = "\n" + self.__class__.__name__ + "\n"
out += "model: {}".format(self.model.__class__.__name__)
out += " number of trainable model parameters: {}".format(num_params)
out += "optimizer: {}".format(self.optimizer.__class__.__name__)
out += "train_data_dir: {}".format(self.train_data_dir)
return out
|
# -*- coding: utf-8 -*-
"""
This code is auto generated from troposphere_mate.code_generator.__init__.py scripts.
"""
import sys
if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover
from typing import Union, List, Any
import troposphere.transfer
from troposphere.transfer import (
EndpointDetails as _EndpointDetails,
HomeDirectoryMapEntry as _HomeDirectoryMapEntry,
IdentityProviderDetails as _IdentityProviderDetails,
Tags as _Tags,
)
from troposphere import Template, AWSHelperFn
from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin
from troposphere_mate.core.sentiel import REQUIRED, NOTHING
class EndpointDetails(troposphere.transfer.EndpointDetails, Mixin):
def __init__(self,
title=None,
AddressAllocationIds=NOTHING, # type: List[Union[str, AWSHelperFn]]
SecurityGroupIds=NOTHING, # type: List[Union[str, AWSHelperFn]]
SubnetIds=NOTHING, # type: List[Union[str, AWSHelperFn]]
VpcEndpointId=NOTHING, # type: Union[str, AWSHelperFn]
VpcId=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
AddressAllocationIds=AddressAllocationIds,
SecurityGroupIds=SecurityGroupIds,
SubnetIds=SubnetIds,
VpcEndpointId=VpcEndpointId,
VpcId=VpcId,
**kwargs
)
super(EndpointDetails, self).__init__(**processed_kwargs)
class IdentityProviderDetails(troposphere.transfer.IdentityProviderDetails, Mixin):
def __init__(self,
title=None,
InvocationRole=REQUIRED, # type: Union[str, AWSHelperFn]
Url=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
InvocationRole=InvocationRole,
Url=Url,
**kwargs
)
super(IdentityProviderDetails, self).__init__(**processed_kwargs)
class Server(troposphere.transfer.Server, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
Certificate=NOTHING, # type: Union[str, AWSHelperFn]
EndpointDetails=NOTHING, # type: _EndpointDetails
EndpointType=NOTHING, # type: Union[str, AWSHelperFn]
IdentityProviderDetails=NOTHING, # type: _IdentityProviderDetails
IdentityProviderType=NOTHING, # type: Union[str, AWSHelperFn]
LoggingRole=NOTHING, # type: Union[str, AWSHelperFn]
Protocols=NOTHING, # type: List[Union[str, AWSHelperFn]]
SecurityPolicyName=NOTHING, # type: Union[str, AWSHelperFn]
Tags=NOTHING, # type: _Tags
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
Certificate=Certificate,
EndpointDetails=EndpointDetails,
EndpointType=EndpointType,
IdentityProviderDetails=IdentityProviderDetails,
IdentityProviderType=IdentityProviderType,
LoggingRole=LoggingRole,
Protocols=Protocols,
SecurityPolicyName=SecurityPolicyName,
Tags=Tags,
**kwargs
)
super(Server, self).__init__(**processed_kwargs)
class HomeDirectoryMapEntry(troposphere.transfer.HomeDirectoryMapEntry, Mixin):
def __init__(self,
title=None,
Entry=REQUIRED, # type: Union[str, AWSHelperFn]
Target=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Entry=Entry,
Target=Target,
**kwargs
)
super(HomeDirectoryMapEntry, self).__init__(**processed_kwargs)
class User(troposphere.transfer.User, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
Role=REQUIRED, # type: Union[str, AWSHelperFn]
ServerId=REQUIRED, # type: Union[str, AWSHelperFn]
UserName=REQUIRED, # type: Union[str, AWSHelperFn]
HomeDirectory=NOTHING, # type: Union[str, AWSHelperFn]
HomeDirectoryMappings=NOTHING, # type: List[_HomeDirectoryMapEntry]
HomeDirectoryType=NOTHING, # type: Any
Policy=NOTHING, # type: Union[str, AWSHelperFn]
SshPublicKeys=NOTHING, # type: List[Union[str, AWSHelperFn]]
Tags=NOTHING, # type: _Tags
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
Role=Role,
ServerId=ServerId,
UserName=UserName,
HomeDirectory=HomeDirectory,
HomeDirectoryMappings=HomeDirectoryMappings,
HomeDirectoryType=HomeDirectoryType,
Policy=Policy,
SshPublicKeys=SshPublicKeys,
Tags=Tags,
**kwargs
)
super(User, self).__init__(**processed_kwargs)
|
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.boxlayout import BoxLayout
from kivy.core.window import Window
from string import punctuation, ascii_uppercase, ascii_lowercase, digits
from random import sample, shuffle, randint
import pyperclip
Builder.load_file("Password.kv")
class MyLayout(BoxLayout):
def checkbox_click(self, parameters):
numbers = digits
symbols = punctuation
capital = ascii_uppercase
small = ascii_lowercase
initial_password = ""
if parameters == "easy":
initial_password = sample(small, randint(2, 5)) + sample(capital, randint(3, 5))
elif parameters == "normal":
initial_password = sample(small, randint(2, 3)) + sample(capital, randint(2, 4)) + \
sample(numbers, randint(3, 5))
elif parameters == "hard":
initial_password = sample(small, randint(2, 3)) + sample(capital, randint(2, 3)) + \
sample(numbers, randint(3, 4)) + sample(symbols, randint(3, 5))
shuffle(initial_password)
global final_password
final_password = "".join(initial_password)
self.ids.password.text = f"Your Password: {final_password}"
def copy(self):
try:
pyperclip.copy(final_password)
except Exception as e:
print(f"Error: {e}")
class PGApp(App):
def build(self):
Window.clearcolor = .5, .5, .5, 1
Window.size = (350, 500)
return MyLayout()
if __name__ == '__main__':
PGApp().run()
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import numpy as np
import tensorflow as tf
from scipy.stats import truncnorm
from GraphicsDL.modules_v2.reader import BaseReaderV2, DefaultTFReader, RandomReader
class RoomSizeReader(BaseReaderV2):
def __init__(self, data_dir, batch_size, num_samples, num_devices, shuffle, split, infinite, in_params, out_params,
w_params, prefix, rel_path, name=None, **kwargs):
super().__init__(batch_size, num_devices, shuffle, split, infinite, in_params, out_params, w_params, name,
**kwargs)
self.record_dir = os.path.join(data_dir, rel_path) if rel_path else data_dir
prefix = prefix if prefix else 'room_size'
self.room_size_files = os.path.join(self.record_dir, f'{prefix}_{num_samples}.npz')
self.deterministic = None
self.cur_samples = 0
self.num_samples = num_samples
@staticmethod
def random_room_size(sample_num):
room_rand_x = tf.random.normal([sample_num, 1], mean=4.53, stddev=0.98, dtype=tf.float32)
room_rand_x = tf.clip_by_value(room_rand_x, 1.7, 6.4) / 2
room_rand_z = tf.random.normal([sample_num, 1], mean=4.35, stddev=0.99, dtype=tf.float32)
room_rand_z = tf.clip_by_value(room_rand_z, 1.5, 6.4) / 2
room_rand_y = tf.random.normal([sample_num, 1], mean=2.74, stddev=0.05, dtype=tf.float32)
room_rand_y = tf.clip_by_value(room_rand_y, 2.2, 3.2)
box_max_x = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] + room_rand_x
box_min_x = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] - room_rand_x
box_max_z = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] + room_rand_z
box_min_z = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] - room_rand_z
box_max_y = tf.clip_by_value(room_rand_y, 2.2, 3.2)
box_min_y = tf.zeros_like(box_max_y)
room_rand = tf.concat([box_max_x, box_max_y, box_max_z, box_min_x, box_min_y, box_min_z], axis=-1)
return room_rand
def postprocess(self, inputs, post_str):
post_str_split = np.split(np.reshape(np.asarray(post_str.split('-')), [-1, 2]), 2, axis=-1)
p_method, p_args = [a_[..., 0] for a_ in post_str_split]
for p_m, p_a in zip(p_method, p_args):
if p_m == 'scale':
inputs = inputs * float(p_a)
else:
raise NotImplementedError
return inputs
def next_stochastic(self):
in_elem = list()
for i_p in self.in_params:
room_rand = self.random_room_size(self.batch_size)
assert i_p.postprocess
if i_p.postprocess:
room_rand = self.postprocess(room_rand, i_p.postprocess)
all_data = tf.split(room_rand, self.num_devices)
in_elem.append(all_data)
return dict(inputs=in_elem, outputs=list(), weights=list(), alias=list())
def next_deterministic(self):
if self.deterministic is None:
if not os.path.exists(self.room_size_files):
deterministic_data = dict()
self.deterministic = list()
for i_p in self.in_params:
rand_nd = self.random_room_size(self.num_samples)
assert i_p.postprocess
if i_p.postprocess:
rand_nd = self.postprocess(rand_nd, i_p.postprocess)
self.deterministic.append(rand_nd)
deterministic_data[i_p.name] = rand_nd
np.savez_compressed(self.room_size_files, **deterministic_data)
else:
random_reader_meta = np.load(self.room_size_files)
self.deterministic = list()
for i_p in self.in_params:
rand_nd = random_reader_meta[i_p.name].astype(np.float32)
assert rand_nd.shape == (self.num_samples, 6)
self.deterministic.append(rand_nd)
try:
in_elem = list()
if self.cur_samples > self.num_samples - self.batch_size:
raise StopIteration
for d in self.deterministic:
all_data = tf.split(d[self.cur_samples: self.cur_samples + self.batch_size], self.num_devices, axis=0)
in_elem.append(all_data)
self.cur_samples += self.batch_size
return dict(inputs=in_elem, outputs=list(), weights=list(), alias=list())
except StopIteration:
self.cur_samples = 0
raise StopIteration
def next(self):
try:
if self.shuffle:
return self.next_stochastic()
else:
return self.next_deterministic()
except StopIteration:
if self.infinite:
return self.next()
else:
raise StopIteration
class RandomReaderV1(RandomReader):
def __init__(self, data_dir, batch_size, num_samples, num_devices, shuffle, split, infinite, in_params, out_params,
w_params, prefix, rel_path, compress='', name=None, **kwargs):
super().__init__(batch_size, num_samples, num_devices, shuffle, split, infinite, in_params, out_params,
w_params, name, **kwargs)
self.record_dir = os.path.join(data_dir, rel_path) if rel_path else data_dir
prefix = prefix if prefix else 'custom_random'
self.random_reader_files = os.path.join(self.record_dir, f'{prefix}_{num_samples}.npz')
def next_deterministic(self):
if self.deterministic is None:
if not os.path.exists(self.random_reader_files):
deterministic_data = dict()
self.deterministic = list()
for i_p in self.in_params:
rand_nd = truncnorm.rvs(-1, 1, size=[self.num_samples, *i_p.raw_shape]).astype(np.float32)
# rand_nd = np.random.normal(size=[self.num_samples, *i_p.raw_shape]).astype(np.float32)
self.deterministic.append(rand_nd)
deterministic_data[i_p.name] = rand_nd
np.savez_compressed(self.random_reader_files, **deterministic_data)
else:
random_reader_meta = np.load(self.random_reader_files)
self.deterministic = list()
for i_p in self.in_params:
rand_nd = random_reader_meta[i_p.name].astype(np.float32)
assert rand_nd.shape == (self.num_samples, *i_p.raw_shape)
self.deterministic.append(rand_nd)
return super().next_deterministic()
class Str3DRoomSizeReader(RoomSizeReader):
def __init__(self, data_dir, batch_size, num_samples, num_devices, shuffle, split, infinite, in_params, out_params,
w_params, prefix, rel_path, name=None, **kwargs):
super().__init__(data_dir, batch_size, num_samples, num_devices, shuffle, split, infinite, in_params,
out_params, w_params, prefix, rel_path, name, **kwargs)
@staticmethod
def random_room_size(sample_num):
room_rand_x = tf.random.normal([sample_num, 1], mean=3.98, stddev=1.14, dtype=tf.float32)
room_rand_x = tf.clip_by_value(room_rand_x, 2.2, 6.4) / 2
room_rand_z = tf.random.normal([sample_num, 1], mean=3.98, stddev=1.14, dtype=tf.float32)
room_rand_z = tf.clip_by_value(room_rand_z, 2.2, 6.4) / 2
room_rand_y = tf.random.normal([sample_num, 1], mean=2.74, stddev=0.05, dtype=tf.float32)
room_rand_y = tf.clip_by_value(room_rand_y, 2.2, 3.2)
box_max_x = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] + room_rand_x
box_min_x = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] - room_rand_x
box_max_z = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] + room_rand_z
box_min_z = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] - room_rand_z
box_max_y = tf.clip_by_value(room_rand_y, 2.2, 3.2)
box_min_y = tf.zeros_like(box_max_y)
room_rand = tf.concat([box_max_x, box_max_y, box_max_z, box_min_x, box_min_y, box_min_z], axis=-1)
return room_rand
class Str3DLivingRoomSizeReader(RoomSizeReader):
def __init__(self, data_dir, batch_size, num_samples, num_devices, shuffle, split, infinite, in_params, out_params,
w_params, prefix, rel_path, name=None, **kwargs):
super().__init__(data_dir, batch_size, num_samples, num_devices, shuffle, split, infinite, in_params,
out_params, w_params, prefix, rel_path, name, **kwargs)
@staticmethod
def random_room_size(sample_num):
room_rand_x = tf.random.normal([sample_num, 1], mean=8.44, stddev=1.70, dtype=tf.float32)
room_rand_x = tf.clip_by_value(room_rand_x, 4.0, 9.6) / 2
room_rand_z = tf.random.normal([sample_num, 1], mean=8.44, stddev=1.70, dtype=tf.float32)
room_rand_z = tf.clip_by_value(room_rand_z, 4.0, 9.6) / 2
room_rand_y = tf.random.normal([sample_num, 1], mean=2.80, stddev=0.06, dtype=tf.float32)
room_rand_y = tf.clip_by_value(room_rand_y, 2.6, 3.0)
box_max_x = tf.convert_to_tensor([4.8], dtype=tf.float32)[tf.newaxis, ...] + room_rand_x
box_min_x = tf.convert_to_tensor([4.8], dtype=tf.float32)[tf.newaxis, ...] - room_rand_x
box_max_z = tf.convert_to_tensor([4.8], dtype=tf.float32)[tf.newaxis, ...] + room_rand_z
box_min_z = tf.convert_to_tensor([4.8], dtype=tf.float32)[tf.newaxis, ...] - room_rand_z
box_max_y = tf.clip_by_value(room_rand_y, 2.6, 3.0)
box_min_y = tf.zeros_like(box_max_y)
room_rand = tf.concat([box_max_x, box_max_y, box_max_z, box_min_x, box_min_y, box_min_z], axis=-1)
return room_rand
class Str3DKitchenRoomSizeReader(RoomSizeReader):
def __init__(self, data_dir, batch_size, num_samples, num_devices, shuffle, split, infinite, in_params, out_params,
w_params, prefix, rel_path, name=None, **kwargs):
super().__init__(data_dir, batch_size, num_samples, num_devices, shuffle, split, infinite, in_params,
out_params, w_params, prefix, rel_path, name, **kwargs)
@staticmethod
def random_room_size(sample_num):
room_rand_x = tf.random.normal([sample_num, 1], mean=3.32, stddev=0.74, dtype=tf.float32)
room_rand_x = tf.clip_by_value(room_rand_x, 2.0, 6.4) / 2
room_rand_z = tf.random.normal([sample_num, 1], mean=3.32, stddev=0.74, dtype=tf.float32)
room_rand_z = tf.clip_by_value(room_rand_z, 2.0, 6.4) / 2
room_rand_y = tf.random.normal([sample_num, 1], mean=2.80, stddev=0.06, dtype=tf.float32)
room_rand_y = tf.clip_by_value(room_rand_y, 2.5, 3.2)
box_max_x = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] + room_rand_x
box_min_x = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] - room_rand_x
box_max_z = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] + room_rand_z
box_min_z = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] - room_rand_z
box_max_y = tf.clip_by_value(room_rand_y, 2.5, 3.2)
box_min_y = tf.zeros_like(box_max_y)
room_rand = tf.concat([box_max_x, box_max_y, box_max_z, box_min_x, box_min_y, box_min_z], axis=-1)
return room_rand
class Mat3DBedroomRoomSizeReader(RoomSizeReader):
def __init__(self, data_dir, batch_size, num_samples, num_devices, shuffle, split, infinite, in_params, out_params,
w_params, prefix, rel_path, name=None, **kwargs):
super().__init__(data_dir, batch_size, num_samples, num_devices, shuffle, split, infinite, in_params,
out_params, w_params, prefix, rel_path, name, **kwargs)
@staticmethod
def random_room_size(sample_num):
room_rand_x = tf.random.normal([sample_num, 1], mean=4.164, stddev=0.973, dtype=tf.float32)
room_rand_x = tf.clip_by_value(room_rand_x, 2.2, 6.4) / 2
room_rand_z = tf.random.normal([sample_num, 1], mean=4.265, stddev=0.955, dtype=tf.float32)
room_rand_z = tf.clip_by_value(room_rand_z, 2.2, 6.4) / 2
room_rand_y = tf.random.normal([sample_num, 1], mean=2.387, stddev=0.425, dtype=tf.float32)
room_rand_y = tf.clip_by_value(room_rand_y, 2.2, 3.2)
box_max_x = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] + room_rand_x
box_min_x = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] - room_rand_x
box_max_z = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] + room_rand_z
box_min_z = tf.convert_to_tensor([3.2], dtype=tf.float32)[tf.newaxis, ...] - room_rand_z
box_max_y = tf.clip_by_value(room_rand_y, 1.7, 3.2)
box_min_y = tf.zeros_like(box_max_y)
room_rand = tf.concat([box_max_x, box_max_y, box_max_z, box_min_x, box_min_y, box_min_z], axis=-1)
return room_rand
|
from src.buttons.call_button import CallButton
from src.buttons.close_doors_button import CloseDoorsButton
from src.buttons.combo_button import ComboButton
from src.buttons.down_button import DownButton
from src.buttons.floor_number_button import FloorNumberButton
from src.buttons.open_doors_button import OpenDoorsButton
from src.buttons.up_button import UpButton
|
#!/usr/bin/env python3
import argparse
import sys
ID_TAG = "0_id"
SEL_TAG = "example (encoder) selection"
def main(args):
results = []
scores = []
headcount = []
epoch = {}
with open(args.logfile, "r") as fh:
for line in fh:
line = [x.strip() for x in line.split("|")]
if len(line) < 3:
continue
if line[2] == "fairseq.tasks.translation_modular":
if "example " not in line[3]:
continue
item = [x.strip() for x in line[3].split(":")]
if item[0] not in epoch:
epoch[item[0]] = [item[1]]
else:
epoch[item[0]].append(item[1])
if item[0] == "example hypothesis":
if ID_TAG not in epoch:
epoch[ID_TAG] = ["0"]
else:
epoch[ID_TAG].append(str(len(epoch["0_id"])))
if line[2] == "valid":
scores.append(line[12].split(" ")[1])
headcount.append(line[17].split(" ")[1:])
results.append(epoch)
epoch = {}
if args.print_head_counts:
for r in results:
heads = {}
for sel in r[SEL_TAG]:
for h in sel.split(","):
if h not in heads:
heads[h] = 1
else:
heads[h] += 1
print(" ".join(["{}-{}".format(k, v) for k, v in heads.items()]))
sys.exit()
if args.print_subset_counts:
for r in results:
subsets = {}
for sel in r[SEL_TAG]:
if sel not in subsets:
subsets[sel] = 1
else:
subsets[sel] += 1
print(" ".join(["{}-{}".format(k, v) for k, v in subsets.items()]))
sys.exit()
for r, s, h in zip(results, scores, headcount):
keys = sorted(list(r.keys()))
for i in range(len(r[keys[0]])):
print("\t".join([r[k][i] for k in keys]))
print("VALID\t{}\t{}".format(s, h))
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--logfile", type=str, required=True)
parser.add_argument("--print-head-counts", action="store_true")
parser.add_argument("--print-subset-counts", action="store_true")
return parser.parse_args()
if __name__ == "__main__":
main(parse_args())
|
import json
import requests
import pandas as pd
from iex.utils import (parse_date,
validate_date_format,
validate_range_set,
validate_output_format,
timestamp_to_datetime,
timestamp_to_isoformat)
from iex.constants import (CHART_RANGES,
RANGES,
DATE_FIELDS,
BASE_URL,
BASE_SIO_URL,
BASE_SIO_VERSION)
from socketIO_client_nexus import (SocketIO,
SocketIONamespace)
class feed_handler(SocketIONamespace):
def on_connect(self):
pass
def on_disconnect(self):
pass
def on_message(self, msg):
data = json.loads(msg)
class IexMarket:
def __init__(self, symbols = None, socket_handler = None, date_format='timestamp', output_format='dataframe'):
"""
Args:
socket_handler - Function for handling socket feed.
date_format - Converts dates
output_format - dataframe (pandas) or json
"""
self.symbols = symbols
self.socket_handler = socket_handler
self.date_format = validate_date_format(date_format)
self.output_format = validate_output_format(output_format)
def _socket(self):
socket = SocketIO('https://ws-api.iextrading.com', 443)
namespace = socket.define(feed_handler, "/1.0/tops")
symbols = "snap"
namespace.emit('subscribe', 'firehose')
socket.wait()
def _get(self, url, params={}):
request_url =f"{BASE_URL}"
response = requests.get(f"{request_url}/{url}", params=params)
if response.status_code != 200:
raise Exception(f"{response.status_code}: {response.content.decode('utf-8')}")
result = response.json()
# timestamp conversion
if type(result) == dict and self.date_format:
if self.date_format == 'datetime':
date_apply_func = timestamp_to_datetime
elif self.date_format == 'isoformat':
date_apply_func = timestamp_to_isoformat
for key, val in result.items():
if key in DATE_FIELDS:
result[key] = date_apply_func(val)
if self.output_format =='dataframe':
if url == 'previous':
# Reorient previous result.
result = pd.DataFrame.from_dict(result).transpose().reset_index()
cols = ['symbol'] + [x for x in result.columns if x != 'symbol' and x != 'index']
result = result.reindex(cols, axis=1)
return result
return pd.DataFrame.from_dict(result)
def tops(self):
params = {'symbols', ','.join(self.symbols)} if self.symbols else {}
return self._get("tops")
def __repr__(self):
return f"<iex_market>"
|
import random
import string
from pymongo import MongoClient
import certifi
import validators
# Mongo DB connection, ask me for the password! :)
# TODO: Store password in credstash
user = "unitedmasters"
password = ""
cluster = MongoClient(f"mongodb+srv://{user}:{password}@shortdata.yyg2l.mongodb.net/test?retryWrites=true&w=majority", tlsCAFile=certifi.where())
db = cluster["shortened"]
collection = db["test-data"]
# Gets the original URL based on the shortened url
def get_url(url):
# We always know its going to be sho.rt, so hard coding it for now
url_id = url.replace("sho.rt/", "")
# Check Mongo for that id
result = collection.find_one({"_id": url_id})
# If result is not in the table it will return a 404
if result:
return {"og_url": result["url"]}
else:
return "Couldn't find that URL sorry", 404
# Generates a random 8 character string id, not the best way to do it long term
def id_generator():
return "".join(random.choices(string.ascii_letters, k=8))
# Shortens the given url by creating a unique id that will get stored to mongoDB
# returns the new shortened url
def url_shortener(url):
if not url:
return "URL cannot be empty", 400
valid=validators.url(url)
if not valid:
return "Invalid URL, did you forget the https:// ? ", 400
new_id = id_generator()
# A way to stop id coalitions
while collection.find_one({"_id": new_id}):
new_id = id_generator()
# Insert new pair to mongo
pair = {"_id": new_id, "url": url}
collection.insert_one(pair)
return {"short_url": f"sho.rt/{new_id}"}
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
import socket
from contextlib import closing
from unisim import BaseClient
class Client(BaseClient):
def interpreter(self):
pass
def event_handler(self, info):
if info["edges"][-1] in ("p1en", "p2en", "p3en"):
if info["road"] in ("res1", "res2", "res3"):
self._cmd = "get_parking"
return True
if __name__ == '__main__':
pass
Client("", 4001).run('get_parking')
|
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import sys
import thinkstats2
import thinkplot
def ReadFile(filename):
"""Reads a list of numbers from a file.
filename: string
returns: list of float
"""
fp = open(filename)
data = []
for line in fp:
x = float(line.strip())
data.append(x)
return data
def main(script, filename='mystery0.dat'):
data = ReadFile(filename)
cdf = thinkstats2.Cdf(data)
thinkplot.PrePlot(num=6, rows=2, cols=3)
thinkplot.SubPlot(1)
thinkplot.Cdf(cdf, color='C0', label=filename)
thinkplot.Config(title='CDF on linear scale', ylabel='CDF')
thinkplot.SubPlot(2)
scale = thinkplot.Cdf(cdf, xscale='log', color='C0')
thinkplot.Config(title='CDF on log-x scale', ylabel='CDF', **scale)
thinkplot.SubPlot(3)
scale = thinkplot.Cdf(cdf, transform='exponential', color='C0')
thinkplot.Config(title='CCDF on log-y scale', ylabel='log CCDF', **scale)
thinkplot.SubPlot(4)
xs, ys = thinkstats2.NormalProbability(data)
thinkplot.Plot(xs, ys, color='C0')
thinkplot.Config(title='Normal probability plot',
xlabel='random normal', ylabel='data')
thinkplot.SubPlot(5)
scale = thinkplot.Cdf(cdf, transform='pareto', color='C0')
thinkplot.Config(title='CCDF on log-log scale', ylabel='log CCDF', **scale)
thinkplot.SubPlot(6)
scale = thinkplot.Cdf(cdf, transform='weibull', color='C0')
thinkplot.Config(title='CCDF on loglog-y log-x scale',
ylabel='log log CCDF', **scale)
thinkplot.Show(legend=False)
if __name__ == '__main__':
main(*sys.argv)
|
"""Cache for storing and retrieving data specific to collections.
Supports caching the collection and caching documents within those collections.
"""
import os
import threading
from grow.collections import collection
class CollectionCache(object):
def __init__(self):
self.reset()
self._lock = threading.RLock()
@staticmethod
def generate_cache_key(pod_path, locale):
return '{}::{}'.format(pod_path, locale)
def add_collection(self, col):
self._cache[col.collection_path] = {
'collection': col,
'docs': {},
}
def add_document(self, doc):
col = doc.collection
self.ensure_collection(col)
# NOTE: Using `doc.locale` causes infinite loop since it needs to load
# the fields (which can have circular dependencies).
cache_key = CollectionCache.generate_cache_key(
doc.pod_path, doc.locale_safe)
self._cache[col.collection_path]['docs'][cache_key] = doc
def add_document_locale(self, doc, locale):
"""Force a doc to be saved to a specific locale.
When docs have a default locale that differs from the collection default
it causes issues since the cache does not know that None locale is not correct.
This allows for the None locale to be forced to the default locale from the
doc.
"""
col = doc.collection
self.ensure_collection(col)
cache_key = CollectionCache.generate_cache_key(
doc.pod_path, locale)
self._cache[col.collection_path]['docs'][cache_key] = doc
def remove_by_path(self, path):
"""Removes the collection or document based on the path."""
if path.startswith(collection.Collection.CONTENT_PATH):
if path.endswith(
'/{}'.format(collection.Collection.BLUEPRINT_PATH)):
# If this is a blueprint then remove the entire collection.
col_path = path[len(collection.Collection.CONTENT_PATH):]
# Get just the directory.
col_path = os.path.split(col_path)[0]
collection_path = col_path[1:] # Remove /
with self._lock:
if collection_path in self._cache:
del self._cache[collection_path]
else:
# Search for an existing collection path.
col_path = path[len(collection.Collection.CONTENT_PATH):]
col_path = os.path.split(col_path)[0]
while col_path != os.sep:
collection_path = col_path[1:]
with self._lock:
if collection_path in self._cache:
# Do a 'wildcard' match on the path to remove all
# locales.
generic_key = CollectionCache.generate_cache_key(
path, '')
for key in self._cache[collection_path]['docs'].keys():
if key.startswith(generic_key):
del self._cache[
collection_path]['docs'][key]
return
col_path = os.path.split(col_path)[0]
def remove_collection(self, col):
with self._lock:
if col.collection_path in self._cache:
del self._cache[col.collection_path]
def remove_document(self, doc):
col = doc.collection
with self._lock:
if col.collection_path in self._cache:
cache_key = CollectionCache.generate_cache_key(
doc.pod_path, doc.locale_safe)
if cache_key in self._cache[col.collection_path]['docs']:
del self._cache[col.collection_path]['docs'][cache_key]
def remove_document_locale(self, doc, locale):
col = doc.collection
with self._lock:
if col.collection_path in self._cache:
cache_key = CollectionCache.generate_cache_key(
doc.pod_path, locale)
if cache_key in self._cache[col.collection_path]['docs']:
del self._cache[col.collection_path]['docs'][cache_key]
def remove_document_locales(self, doc):
col = doc.collection
with self._lock:
if col.collection_path in self._cache:
doc_cache_key = CollectionCache.generate_cache_key(
doc.pod_path, '')
invalid_keys = []
for cache_key in self._cache[col.collection_path]['docs'].keys():
if cache_key.startswith(doc_cache_key):
invalid_keys.append(cache_key)
for cache_key in invalid_keys:
del self._cache[col.collection_path]['docs'][cache_key]
def ensure_collection(self, col):
with self._lock:
if col.collection_path not in self._cache:
self.add_collection(col)
def get_collection(self, collection_path):
collection_path = collection.Collection.clean_collection_path(
collection_path)
if collection_path in self._cache:
return self._cache[collection_path]['collection']
return None
def get_document(self, col, pod_path, locale):
if col.collection_path in self._cache:
cache_key = CollectionCache.generate_cache_key(pod_path, locale)
return self._cache[col.collection_path]['docs'].get(
cache_key, None)
return None
def reset(self):
self._cache = {}
|
import pptree
from mcts import parameters as p
from mcts.smiles import SMILES
class Node:
"""
Representation of a node in the MCTS tree
Each node own its SMILES, number of visits and score
"""
def __init__(self, smiles=SMILES(), parent=None):
"""
Initialisation of a new node
:param smiles: the state of the SMILES on the node
:type smiles: SMILES
:param parent: parent of the node
:type parent: Node
"""
self.smiles = smiles
self.parent = parent
self.children = []
self.visits = 0
self.score = 0.0
def new_child(self, child_smile):
"""
Add a new child to the current node
:param child_smile: the SMILES used to create the new node
:type child_smile: SMILES
:return: None
"""
self.children.append(Node(child_smile, self))
def update(self, reward):
"""
Recursive function to update a Node
:param reward: reward to update the score of the Node
:type reward: float
:return: None
"""
self.score += reward
self.visits += 1
if self.parent:
self.parent.update(reward)
def fully_expanded(self):
"""
Return True if the number of children is equal to the size of the vocabulary (minus 2 for \\n and &)
or if the SMILES of the node is greater than 80 (maximum for the RNN)
:return: True if the node is terminal in the tree else False
"""
return (len(self.children) == (len(p.tokens) - 2)) or (len(self.smiles.element) > 80)
def out_pptree(self, parent=None):
"""
Recursive function
Return instance of pptree module to print the tree
:param parent: parent of the tree (for the recursive part of the function)
:type parent: Node
:return: instance of pptree.Node
"""
name = repr(self)
if parent:
current = pptree.Node(name, parent)
else:
current = pptree.Node(name)
for c in self.children:
c.out_pptree(current)
return current
def get_height(self):
"""
Return the height of the node
:return: the height of the node
"""
if not self.children:
return 1
else:
return 1 + max([n.get_height() for n in self.children])
def get_size(self):
"""
Return the size of the Node
:return: the size of the Node
"""
if not self.children:
return 1
else:
return 1 + sum([n.get_size() for n in self.children])
def echo(self):
"""
Print the current tree
:return: None
"""
pptree.print_tree(self.out_pptree())
print("Size of the tree : %d" % self.get_size())
print("Height of the tree : %d" % self.get_height())
def __eq__(self, other):
"""
Equality between two nodes
:param other: an other node
:type other: Node
:return: True if other is the same node
"""
return str(self.smiles) == str(other.smiles)
def __repr__(self):
"""
Representation of a Node
:return: string representing the node
"""
return str(self.smiles) + " " + str(int(self.visits)) + " " + str(round(self.score, 2))
|
from collections import OrderedDict
from typing import Union, Callable, List, Dict, Any, Optional, Tuple
from pathlib import Path
from movado.controller import Controller, is_call_exact
from movado.estimator import Estimator
from movado.mab_handler import MabHandler
import numpy as np
import itertools
import random
import scipy.special
import asyncio
import functools
import concurrent.futures
import multiprocessing
# This inputs are used to populate the symbol table for class retrieval
# noinspection PyUnresolvedReferences
from movado.mab_controller import MabController # pylint: disable=unused-import
# noinspection PyUnresolvedReferences
from movado.distance_controller import (
DistanceController,
) # pylint: disable=unused-import
class VotingController(Controller):
def __init__(
self,
controller, # class reference => assumption of homogeneous controllers with different parameters
estimator: Estimator,
exact_fitness: Callable[[List[float]], List[float]],
voters: int,
params: "OrderedDict[str, List[Union[int, float, str]]]",
self_exact: Optional[object] = None,
is_point_in_context: bool = False,
mab_weight: bool = True,
debug=False,
):
super(VotingController, self).__init__(
estimator=estimator,
exact_fitness=exact_fitness,
debug=debug,
self_exact=self_exact,
)
self.__is_point_in_context = is_point_in_context
self.__debug = debug
self.__is_soft: bool = False
self.__controllers: List[Controller] = self.__get_controllers(
params,
controller,
estimator,
exact_fitness,
voters,
self_exact=self_exact,
mab_weight=mab_weight,
debug=debug,
)
self.__last_winners: List[Controller] = []
self.__last_decision: int = -1
self.initialize_debug()
self.__params = params
random.seed(0)
def initialize_debug(self):
Path(self._controller_debug).open("a").write(
"Majority_Model_Parameters, Majority_Size, Point,"
+ "Exec_Time, Error, Exact_Estimated_Calls, Mean Weight, Estimation\n"
)
def learn(
self,
is_exact: bool,
point: List[float] = None,
exec_time: float = None,
mab: Optional[Tuple[MabHandler, Union[int, float]]] = None,
mab_forced_probability: Optional[float] = None,
mab_forced_action: Optional[Union[int, float]] = None,
mab_weight: Optional[MabHandler] = None,
mab_weight_forced_probability: Optional[float] = None,
mab_weight_forced_action: Optional[Union[int, float]] = None,
is_point_in_context: bool = True,
):
loop = asyncio.get_event_loop()
executor = concurrent.futures.ThreadPoolExecutor(
max_workers=int(multiprocessing.cpu_count() / 2)
)
loop.set_default_executor(executor)
for controller in self.__controllers:
if controller in self.__last_winners:
loop.run_in_executor(
None,
functools.partial(
controller.learn,
is_exact=is_exact,
point=point,
exec_time=exec_time,
mab=(controller.get_mab(), self.__last_decision),
mab_forced_probability=None,
mab_weight=controller.get_weight_mab(),
mab_weight_forced_probability=None,
is_point_in_context=is_point_in_context,
),
)
# controller.learn(
# is_exact=is_exact,
# point=point,
# exec_time=exec_time,
# mab=(controller.get_mab(), self.__last_decision),
# mab_forced_probability=None,
# mab_weight=controller.get_weight_mab(),
# mab_weight_forced_probability=None,
# is_point_in_context=is_point_in_context,
# )
else:
loop.run_in_executor(
None,
functools.partial(
controller.learn,
is_exact=is_exact,
point=point,
exec_time=exec_time,
mab=(controller.get_mab(), self.__last_decision),
mab_forced_probability=1,
mab_forced_action=(
float(
np.mean(
[
winner.get_mab().get_last_action()
for winner in self.__last_winners
]
)
)
if not self.__is_soft
else self.__last_decision
),
mab_weight=controller.get_weight_mab(),
mab_weight_forced_probability=None,
mab_weight_forced_action=None,
is_point_in_context=is_point_in_context,
),
)
# controller.learn(
# is_exact=is_exact,
# point=point,
# exec_time=exec_time,
# mab=(controller.get_mab(), self.__last_decision),
# mab_forced_probability=1,
# mab_forced_action=(
# float(
# np.mean(
# [
# winner.get_mab().get_last_action()
# for winner in self.__last_winners
# ]
# )
# )
# if not self.__is_soft
# else self.__last_decision
# ),
# mab_weight=controller.get_weight_mab(),
# mab_weight_forced_probability=None,
# mab_weight_forced_action=None,
# is_point_in_context=is_point_in_context,
# )
executor.shutdown(wait=True)
def __get_controllers(
self,
params: "OrderedDict[str, List[Union[int, float, str]]]",
controller,
estimator: Estimator,
exact_fitness: Callable[[List[float]], List[float]],
voters: int,
self_exact: Optional[object],
mab_weight: bool,
debug: bool,
) -> List[Controller]:
# TODO random sample indices, not controllers to save memory
controllers: List[Controller] = []
controller_class = controller
if not controller_class:
raise Exception("Controller '" + str(controller) + "' was not found")
for current_vals in random.sample(
list(itertools.product(*list(params.values()))),
voters,
):
current_params = {k: v for k, v in zip(params.keys(), current_vals)}
controllers.append(
controller_class(
estimator=estimator,
exact_fitness=exact_fitness,
self_exact=self_exact,
debug=debug,
skip_debug_initialization=True,
mab_weight=mab_weight,
**current_params
)
)
if type(controllers[0]) is MabController:
self.__is_soft = True
mab: MabHandler = controllers[0].get_mab()
mab_weight: MabHandler = controllers[0].get_weight_mab()
if mab:
mab.initialize_debug()
if mab_weight:
mab_weight.initialize_debug()
return list(random.sample(controllers, voters))
def __compute_weight_vector(self):
return scipy.special.softmax(
[-contr.get_mab().get_mean_cost() for contr in self.__controllers]
)
def compute_objective(
self, point: List[int], decision_only: bool = False
) -> List[float]:
decisions = []
loop = asyncio.get_event_loop()
executor = concurrent.futures.ThreadPoolExecutor(
max_workers=int(multiprocessing.cpu_count() / 2)
)
loop.set_default_executor(executor)
if self.__is_soft:
for controller in self.__controllers:
loop.run_in_executor(
None,
functools.partial(
decisions.append,
controller.compute_objective(point, probability=True),
),
)
# decisions.append(controller.compute_objective(point, probability=True))
executor.shutdown(wait=True)
decision = 1 - np.average([d[1] for d in decisions])
else:
for controller in self.__controllers:
loop.run_in_executor(
None,
functools.partial(
decisions.append,
controller.compute_objective(point, decision_only=True),
),
)
# decisions.append(
# controller.compute_objective(point, decision_only=True)
# )
executor.shutdown(wait=True)
decision = np.average(decisions, weights=self.__compute_weight_vector())
if decision >= 0.5 or self._estimator.get_error() == 0.0:
self.__last_decision = 1
if self.__is_soft:
self.__last_winners = [
controller_decision[0]
for controller_decision in zip(self.__controllers, decisions)
if controller_decision[1][0] == 1
]
else:
self.__last_winners = [
controller_decision[0]
for controller_decision in zip(self.__controllers, decisions)
if controller_decision[1] == 1
]
out, exec_time = self._compute_exact(
point, is_point_in_context=self.__is_point_in_context
)
else:
self.__last_decision = 0
if self.__is_soft:
self.__last_winners = [
controller_decision[0]
for controller_decision in zip(self.__controllers, decisions)
if controller_decision[1][0] == 0
]
else:
self.__last_winners = [
controller_decision[0]
for controller_decision in zip(self.__controllers, decisions)
if controller_decision[1] == 0
]
out, exec_time = self._compute_estimated(
point,
is_point_in_context=self.__is_point_in_context,
)
if self._debug:
params = (
self.__last_winners[0].get_parameters()
if self.__last_winners
else ["Model_Parameters", "Empty Hard Majority"]
)
self.write_debug(
{
params[0]: params[1],
"Majority_Size": len(self.__last_winners),
"Point": point,
"Exec_Time": exec_time,
"Error": self._estimator.get_error(),
"Exact_Estimated_Calls": [
is_call_exact.count(True),
is_call_exact.count(False),
],
"Mean Weight": np.mean(
[
ctrl.get_weight_mab().get_last_action()
for ctrl in self.__controllers
]
),
"Estimation": int(not self.__last_decision),
}
)
return out
def write_debug(self, debug_info: Dict[str, Any]):
Path(self._controller_debug).open("a").write(
str(debug_info["Model_Parameters"])
+ ", "
+ str(debug_info["Majority_Size"])
+ ", "
+ str(debug_info["Point"])
+ ", "
+ str(debug_info["Exec_Time"])
+ ", "
+ str(debug_info["Error"])
+ ", "
+ str(debug_info["Exact_Estimated_Calls"])
+ ", "
+ str(debug_info["Mean Weight"])
+ ", "
+ str(debug_info["Estimation"])
+ "\n"
)
def get_mab(self):
raise Exception("'get_mab': Unsupported method in voting controller")
def get_weight_mab(self):
raise Exception("'get_weight_mab': Unsupported method in voting controller")
def get_parameters(self):
return self.__params
|
# STEP 4: use trained FQG model to generate new QG data using augmented sentences
import os
import argparse
import logging
from polyaxon_client.tracking import get_outputs_path
# Set up the logger
logging.basicConfig(format='%(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("data_dir", type=str, help="Directory containing the data for the ACS-QG project")
args = parser.parse_args()
data_dir = args.data_dir.rstrip("/") + "/"
OUT_DIR = get_outputs_path().rstrip("/") + "/"
# debug
processed_path = data_dir + "processed/SQuAD2.0/"
data_file_prefix = "train"
st_idx = str(0)
ed_idx = str(50000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.4e8b.debug.json" \
--debug')
# squad data
processed_path = data_dir + "processed/SQuAD2.0/"
data_file_prefix = "train"
st_idx = str(0)
ed_idx = str(50000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/SQuAD2.0/"
data_file_prefix = "train"
st_idx = str(50000)
ed_idx = str(92210)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
# wiki data
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(0)
ed_idx = str(50000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(50000)
ed_idx = str(100000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(100000)
ed_idx = str(150000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(150000)
ed_idx = str(200000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(200000)
ed_idx = str(250000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(250000)
ed_idx = str(300000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(300000)
ed_idx = str(350000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(350000)
ed_idx = str(400000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(400000)
ed_idx = str(450000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(450000)
ed_idx = str(500000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(500000)
ed_idx = str(550000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(550000)
ed_idx = str(600000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(600000)
ed_idx = str(650000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(650000)
ed_idx = str(700000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(700000)
ed_idx = str(750000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(750000)
ed_idx = str(800000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(800000)
ed_idx = str(850000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(850000)
ed_idx = str(900000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(900000)
ed_idx = str(9500000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
processed_path = data_dir + "processed/Wiki10000/"
data_file_prefix = "wiki10000"
st_idx = str(950000)
ed_idx = str(1000000)
os.system('CUDA_VISIBLE_DEVICES=0 PYTHONIOENCODING=utf-8 python3 QG_gpt2_generate.py \
--model_type gpt2 \
--model_name_or_path ' + data_dir + 'output/QG/gpt2_question_generation/4epochs/2batchsize/ \
--filename "' + processed_path + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.pkl" \
--filecache "' + OUT_DIR + data_file_prefix + '.sentences.augmented.' + st_idx + '_' + ed_idx + '.cache.qg.gpt2.pth" \
--data_type augmented_sents \
--output_file "' + OUT_DIR + data_file_prefix + '.qa.' + st_idx + '_' + ed_idx + '.qg.generated.gpt2.json"')
|
from setuptools import setup
setup(
name='django-extra-views',
version='0.2.4',
url='https://github.com/AndrewIngram/django-extra-views',
description="Extra class-based views for Django",
long_description=open('README.rst', 'r').read(),
license="MIT",
author="Andrew Ingram",
author_email="andy@andrewingram.net",
packages=['extra_views'],
package_dir={'': '.'},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Programming Language :: Python']
)
|
import uuid
from typing import Any
import pydantic
from aiohttp import web
import xjsonrpc.server.specs.extractors.docstring
import xjsonrpc.server.specs.extractors.pydantic
from xjsonrpc.server.integration import aiohttp as integration
from xjsonrpc.server.validators import pydantic as validators
from xjsonrpc.server.specs import extractors, openapi as specs
user_methods = xjsonrpc.server.MethodRegistry()
post_methods = xjsonrpc.server.MethodRegistry()
validator = validators.PydanticValidator()
credentials = {"admin": "admin"}
class JSONEncoder(xjsonrpc.JSONEncoder):
def default(self, o: Any) -> Any:
if isinstance(o, pydantic.BaseModel):
return o.dict()
if isinstance(o, uuid.UUID):
return str(o)
return super().default(o)
class UserIn(pydantic.BaseModel):
"""
User registration data.
"""
name: str
surname: str
age: int
class UserOut(UserIn):
"""
Registered user data.
"""
id: uuid.UUID
class AlreadyExistsError(xjsonrpc.exc.JsonRpcError):
"""
User already registered error.
"""
code = 2001
message = "user already exists"
@specs.annotate(
tags=['users'],
errors=[AlreadyExistsError],
examples=[
specs.MethodExample(
summary="Simple example",
params=dict(
user={
'name': 'John',
'surname': 'Doe',
'age': 25,
},
),
result={
'id': 'c47726c6-a232-45f1-944f-60b98966ff1b',
'name': 'John',
'surname': 'Doe',
'age': 25,
},
),
],
)
@user_methods.add(context='request')
@validator.validate
def add_user(request: web.Request, user: UserIn) -> UserOut:
"""
Creates a user.
:param request: http request
:param object user: user data
:return object: registered user
:raise AlreadyExistsError: user already exists
"""
user_id = uuid.uuid4().hex
request.config_dict['users'][user_id] = user
return UserOut(id=user_id, **user.dict())
class PostIn(pydantic.BaseModel):
"""
User registration data.
"""
title: str
content: str
class PostOut(PostIn):
"""
Registered user data.
"""
id: uuid.UUID
@specs.annotate(
tags=['posts'],
errors=[AlreadyExistsError],
examples=[
specs.MethodExample(
summary="Simple example",
params=dict(
post={
'title': 'Super post',
'content': 'My first post',
},
),
result={
'id': 'c47726c6-a232-45f1-944f-60b98966ff1b',
'title': 'Super post',
'content': 'My first post',
},
),
],
)
@post_methods.add(context='request')
@validator.validate
def add_post(request: web.Request, post: PostIn) -> PostOut:
"""
Creates a post.
:param request: http request
:param object post: post data
:return object: created post
"""
post_id = uuid.uuid4().hex
request.config_dict['posts'][post_id] = post
return PostOut(id=post_id, **post.dict())
jsonrpc_app = integration.Application(
'/api/v1',
json_encoder=JSONEncoder,
spec=specs.OpenAPI(
info=specs.Info(version="1.0.0", title="User storage"),
servers=[
specs.Server(
url='http://127.0.0.1:8080',
),
],
security_schemes=dict(
basicAuth=specs.SecurityScheme(
type=specs.SecuritySchemeType.HTTP,
scheme='basic',
),
),
security=[
dict(basicAuth=[]),
],
schema_extractors=[
extractors.docstring.DocstringSchemaExtractor(),
extractors.pydantic.PydanticSchemaExtractor(),
],
ui=specs.SwaggerUI(),
# ui=specs.RapiDoc(),
# ui=specs.ReDoc(),
),
)
jsonrpc_app.app['users'] = {}
jsonrpc_app.app['posts'] = {}
jsonrpc_app.add_endpoint('/users', json_encoder=JSONEncoder).add_methods(user_methods)
jsonrpc_app.add_endpoint('/posts', json_encoder=JSONEncoder).add_methods(post_methods)
if __name__ == "__main__":
web.run_app(jsonrpc_app.app, host='localhost', port=8080)
|
"""
python-aws-dataclasses
"""
__version__="0.4.5"
from aws_dataclasses.sns_event import SnsEvent
from aws_dataclasses.alexa_skill_event import AlexaSkillEvent
from aws_dataclasses.s3_event import S3Event
from aws_dataclasses.cf_event import CloudFrontEvent
from aws_dataclasses.http_proxy_event import ApiGwProxyEvent
|
""" Generate some fragments. """
from typing import TextIO, List
from pathlib import Path
import random
def read_sequence(input_io: TextIO) -> str:
""" Read sequence data. """
return "".join([s.strip() for s in input_io.readlines()])
def genfrags(sequence: str, number: int, size: int) -> List[str]:
"""
Generate `number` fragments all of equal length: `size`, from `sequence`.
Ensure that there is a fragment covering the very beginning
and end of `sequence`.
"""
random.seed()
starts = (random.randint(0, len(sequence) - size) for _ in range(number))
return [sequence[start : start + size] for start in starts] + [
sequence[:size],
sequence[len(sequence) - size :],
]
def generate(filename: Path, number: int, size: int) -> List[str]:
""" Given an input file containing a sequence, generate random fragments. """
with open(filename) as seq_file:
return genfrags(read_sequence(seq_file), number, size)
def write_fragments(output: TextIO, fragments: List[str]) -> None:
" Write fragments to a file. "
output.write("\n".join(fragments))
|
import requests
from bs4 import BeautifulSoup as bs
from get_info_box import scrape_info_box
def scrape_disney_films():
movie_info_list = []
base_path = 'https://en.wikipedia.org'
r = requests.get('https://en.wikipedia.org/wiki/List_of_Walt_Disney_Pictures_films')
soup = bs(r.content, features='html.parser')
movies = soup.select('.wikitable.sortable i a')
for index, movie in enumerate(movies):
try:
relative_path = movie['href']
title = movie['title']
movie_info_list.append(scrape_info_box(base_path + relative_path))
except Exception as e:
print(e)
return movie_info_list
|
import httpx
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from settings import Config
class WeatherSelenium:
def __init__(self):
chrome_options = Options()
chrome_options.headless = True
self.driver = webdriver.Chrome(options=chrome_options)
self.geokey = Config.GEO_KEY
def screenshot(self, url: str) -> bytes:
self.driver.get(url)
elem_summary = self.driver.find_element_by_xpath('//div[@class="c-city-weather-current__bg"]')
summary = elem_summary.screenshot_as_png
return summary
def query_city_id(self, location: str, adm: str) -> str:
url = "https://geoapi.qweather.com/v2/city/lookup"
res = httpx.get(url, params=dict(key=self.geokey, location=location, adm=adm))
assert res.status_code == 200, "查询city id 失败"
j = res.json()
assert j["code"] == "200", "查询city id 失败"
return j["location"][0]["id"]
def query_weather(self, city_id: str) -> str:
url = "https://devapi.qweather.com/v7/weather/now"
res = httpx.get(url, params=dict(key=self.geokey, location=city_id))
assert res.status_code == 200, "查询天气失败"
j = res.json()
assert j["code"] == "200", "查询天气失败"
return j["fxLink"]
def craw_weather(self, location: str, adm: str = None) -> bytes:
city_id = weather_selenium.query_city_id(location, adm)
url = weather_selenium.query_weather(city_id)
return self.screenshot(url)
weather_selenium = WeatherSelenium()
|
from typing import OrderedDict
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from modeling import BertForSequenceClassification, BertConfig
from transformers import AutoTokenizer
import datasets
from tqdm import tqdm
from time import time
from quantize import quantize
from quantized_modeling import BertQuantizedEncoder
import random
from test_utils import *
def main():
config = BertConfig.from_json_file(config_file)
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_path, config=config)
rawdata = datasets.load_dataset("glue", "mrpc")["train"]
loader = DataLoader(rawdata, batch_size=n_samples, shuffle=True)
if no_cuda:
device = torch.device("cpu")
elif local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
# set dropout prob to 0
# config.hidden_dropout_prob = 0
# config.attention_probs_dropout_prob = 0
# get each encoder output
config.output_all_encoded_layers = True
state_dict = torch.load(init_checkpoint)
if task_name == "mrpc" or task_name == "qnli":
orig = BertForSequenceClassification(config, 2)
quant = BertForSequenceClassification(config, 2)
elif task_name == "mnli":
orig = BertForSequenceClassification(config, 3)
quant = BertForSequenceClassification(config, 3)
apply_quantization(orig, config, state_dict)
apply_quantization(quant, config, state_dict, quantization_schemes)
orig.to(device)
quant.to(device)
print(quantization_schemes)
orig.eval()
quant.eval()
if fp16:
orig.half()
quant.half()
with torch.no_grad():
a = time()
for data in loader:
processed_data = process_glue_mrpc_data(data, task_name, tokenizer, device)
for i in range(1):
eval_diff(orig, quant, processed_data, i)
break
print("total time:", time() - a)
task_name = "mrpc"
model_dir = f"/workspace/ft-bert-pyt/model/bert-base-cased-{task_name}/"
config_file = model_dir + "config.json"
init_checkpoint = model_dir + "pytorch_model.bin"
vocab_file = model_dir + "vocab.txt"
tokenizer_config_path = model_dir + "tokenizer_config.json"
tokenizer_path = model_dir
local_rank = 0
n_samples = 100
do_lower_case = False
no_cuda = False
fp16 = False
quantization_schemes = [random.randint(0, 3) for i in range(12)]
quantization_schemes = [3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# quantization_schemes = [0, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0]
pos = 11
if __name__ == "__main__":
main()
|
# Licensed to the White Turing under one or more
# contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, Dict, Union
from .commands import _PATH, Commands
from .exceptions import DeviceConnectionException
from .utils import free_port, is_connectable, merge_dict
class BaseService(Commands):
'''Object that manages the starting and stopping of the AndroidDriver.'''
def __init__(self, executable: _PATH = 'default', port: Union[int, str] = 5037, env: Dict = None) -> None:
super(BaseService, self).__init__(executable)
self.port = port
if self.port == 0:
self.port = free_port()
self.options = {'env': env}
@property
def service_tcp(self) -> str:
'''Gets the TCP of the Service.'''
return f"tcp://localhost:{self.port}"
def service_args(self) -> NotImplemented:
raise NotImplemented(
"This method needs to be implemented in a sub class")
def _build_cmd(self, args: Union[list, tuple]) -> str:
'''Build command.'''
cmd = [self.path]
cmd.extend(self.service_args())
cmd.extend(args)
return cmd
def _execute(self, *args: str, **kwargs: Any) -> tuple:
'''Execute command.'''
return self.execute(args=args, options=merge_dict(self.options, kwargs)).communicate()
def start(self) -> None:
'''Starts the Service.'''
self._execute('start-server')
def stop(self) -> None:
'''Stops the service.'''
self._execute('kill-server')
def restart(self) -> None:
'''Restart the server if it is running.'''
self.stop()
self.start()
def version(self) -> str:
'''Show the version number of Android Debug Bridge.'''
output, _ = self._execute('version')
return output.splitlines()[0].split()[-1]
def devices(self) -> list:
'''List connected devices.'''
output, _ = self._execute('devices')
return output.split()[4::2]
def devices_l(self) -> Dict:
'''List connected devices (-l for long output).'''
output, _ = self._execute('devices', '-l')
devices = output.split()[4::6]
models = output.split()[7::6]
return dict(zip(devices, models))
def connect(self, host: str = '192.168.0.3', port: Union[int, str] = 5555) -> None:
'''Connect to a device via TCP/IP directly.'''
self.device_sn = f'{host}:{port}'
if not is_connectable(host, port):
raise ConnectionError(f'Cannot connect to {self.device_sn}.')
self._execute('connect', self.device_sn)
def disconnect(self, host: str = '192.168.0.3', port: Union[int, str] = 5555) -> None:
'''Disconnect from given TCP/IP device [default port=5555].'''
self.device_sn = None
self._execute('disconnect', f'{host}:{port}')
def disconnect_all(self) -> None:
'''Disconnect all.'''
self.device_sn = None
self._execute('disconnect')
def get_state(self) -> str:
'''offline | bootloader | device'''
output, error = self._execute('get-state')
if error:
raise DeviceConnectionException(error.split(':', 1)[-1].strip())
return output.strip()
class Service(BaseService):
'''Object that manages the starting and stopping of the AndroidDriver.'''
def __init__(self, executable_path: _PATH = 'default', port: Union[int, str] = 5037, env: Dict = None, service_args: Union[list, tuple] = None) -> None:
'''Creates a new instance of the Service.
Args:
executable_path: Path to the AndroidDriver.
port: Port the service is running on.
env: Environment variables.
service_args: List of args to pass to the androiddriver service.
'''
self._service_args = service_args or []
super(Service, self).__init__(executable_path, port=port, env=env)
def service_args(self) -> str:
'''Parameters when starting the service.'''
return ['-P', str(self.port)] + self._service_args
|
# coding: utf-8
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import ListView, RedirectView
from haystack.forms import HighlightedSearchForm
from haystack.views import SearchView
from .models import Document, ParlerDocument
class SearchForm(HighlightedSearchForm):
pass
class DocumentView(ListView):
model = Document
paginate_by = 20
class ParlerView(ListView):
paginate_by = 20
def get_queryset(self):
return ParlerDocument.objects.prefetch_related('translations')
class Search(SearchView):
form = SearchForm
model = Document
paginate_by = 20
class LanguageRedirectView(RedirectView):
def get_redirect_url(self):
return reverse('home')
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.decorators.csrf import csrf_exempt
from django_todo.apps.core.views import CurrentTaskView, CompleteTaskView
urlpatterns = patterns(
'',
url(r'^$', CurrentTaskView.as_view(), name='home'),
url(r'create/$', CurrentTaskView.as_view(), name='create_task'),
url(r'complete/(?P<id>\d+)/$', csrf_exempt(CompleteTaskView.as_view()), name='complete_task'),
(r'^accounts/login/$', 'django.contrib.auth.views.login'),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout'),
url(r'^admin/', include(admin.site.urls)),
)
|
import os
import subprocess
import menupy
DataDir = '/home/alles/.MyTelegram' # Where accounts will be located
Binary = '/usr/bin/telegram-desktop' # Where is Telegram binary
def CheckPatches():
if os.path.exists(DataDir) == False:
try:
os.mkdir(DataDir)
except OSError:
exit("Can't create %s" % path)
else:
print ("Directory %s created!" % path)
if os.path.exists(Binary) == False:
exit("No Telegram binary!")
def MainMenu(msg):
SelectMenu = menupy.OptionMenu(str(msg), title_color="cyan")
SelectMenu.add_option("Login to an existing account", color="green", ret="exist")
SelectMenu.add_option("Login to a new account", color="green", ret="new")
SelectMenu.add_option("Exit", color="red", ret="exit")
result = SelectMenu.run()
if result == "exit":
exit("Have a nice day!")
else:
return result
def SelectAccount():
AccountsDirs = os.listdir(DataDir)
SelectMenu = menupy.OptionMenu("Select Telegram account:", title_color="cyan")
for Dir in AccountsDirs:
SelectMenu.add_option(Dir, color="green", ret=str(Dir))
SelectMenu.add_option("To main menu", color="red", ret="main")
result = SelectMenu.run()
if result == "main":
main("By @Allespro, Thanks to @luxunator for this wonderful menu\nSelect option:")
else:
return result
def NewAccount():
AccountName = str(input("Input your account name: "))
if os.path.exists(DataDir+"/"+AccountName):
main("Account %s is already created!" % AccountName)
else:
try:
os.mkdir(DataDir+"/"+AccountName)
except OSError:
main("Can't create %s" % AccountName)
else:
main("Directory for account %s created! Now you can login!" % AccountName)
def main(mainmsg):
chs = MainMenu(str(mainmsg))
if chs == "exist":
Account = SelectAccount()
command = str(Binary) + " -many -workdir " + DataDir+ "/" + Account
subprocess.Popen(command, shell=True)
exit("\n\nFollow me on Github\nhttps://github.com/Allespro\n\n")
elif chs == "new":
NewAccount()
else:
exit('HOW YOU GET THAT RETURN???')
if __name__ == '__main__':
CheckPatches()
main("By @Allespro, Thanks to @luxunator for this wonderful menu\nSelect option:")
|
"""A sqlite2 store"""
import sqlite3
from typing import Any, List, Mapping, MutableMapping, Tuple
import aiosqlite
from ..types import Session, Store
CREATE_SEQNUM_TABLE_SQL = """
CREATE TABLE IF NOT EXISTS initiator_seqnums
(
sender_comp_id VARCHAR(64) NOT NULL,
target_comp_id VARCHAR(64) NOT NULL,
outgoing_seqnum INT NOT NULL,
incoming_seqnum INT NOT NULL,
PRIMARY KEY (sender_comp_id, target_comp_id)
)
"""
SEQNUM_QUERY = """
SELECT outgoing_seqnum, incoming_seqnum
FROM initiator_seqnums
WHERE sender_comp_id = ? AND target_comp_id = ?
"""
SEQNUM_INSERT = """
INSERT INTO initiator_seqnums(sender_comp_id, target_comp_id, outgoing_seqnum, incoming_seqnum)
VALUES ( ?, ?, ?, ?)
"""
SEQNUM_UPDATE = """
UPDATE initiator_seqnums
SET outgoing_seqnum = ?, incoming_seqnum = ?
WHERE sender_comp_id = ? AND target_comp_id = ?
"""
SEQNUM_UPDATE_OUTGOING = """
UPDATE initiator_seqnums
SET outgoing_seqnum = ?
WHERE sender_comp_id = ? AND target_comp_id = ?
"""
SEQNUM_UPDATE_INCOMING = """
UPDATE initiator_seqnums
SET incoming_seqnum = ?
WHERE sender_comp_id = ? AND target_comp_id = ?
"""
CREATE_MESSAGE_TABLE_SQL = """
CREATE TABLE IF NOT EXISTS initiator_messages
(
sender_comp_id VARCHAR(64) NOT NULL,
target_comp_id VARCHAR(64) NOT NULL,
outgoing_seqnum INT NOT NULL,
incoming_seqnum INT NOT NULL,
message VARCHAR(2048) NOT NULL,
PRIMARY KEY (sender_comp_id, target_comp_id)
)
"""
MESSAGE_INSERT = """
INSERT INTO initiator_messages(sender_comp_id, target_comp_id, outgoing_seqnum, incoming_seqnum, message)
VALUES ( ?, ?, ?, ?, ?)
"""
class SqlSession(Session):
def __init__(
self,
conn_args: List[Any],
conn_kwargs: Mapping[str, Any],
sender_comp_id: str,
target_comp_id: str
) -> None:
self.conn_args = conn_args
self.conn_kwargs = conn_kwargs
conn = sqlite3.connect(*self.conn_args, **self.conn_kwargs)
cursor = conn.cursor()
cursor.execute(SEQNUM_QUERY, (sender_comp_id, target_comp_id))
result = cursor.fetchone()
if result:
self._outgoing_seqnum, self._incoming_seqnum = result
else:
cursor.execute(
SEQNUM_INSERT, (sender_comp_id, target_comp_id, 0, 0))
conn.commit()
self._outgoing_seqnum, self._incoming_seqnum = 0, 0
self._sender_comp_id = sender_comp_id
self._target_comp_id = target_comp_id
@property
def sender_comp_id(self) -> str:
return self._sender_comp_id
@property
def target_comp_id(self) -> str:
return self._target_comp_id
async def get_seqnums(self) -> Tuple[int, int]:
return self._outgoing_seqnum, self._incoming_seqnum
async def set_seqnums(self, outgoing_seqnum: int, incoming_seqnum: int) -> None:
self._outgoing_seqnum, self._incoming_seqnum = outgoing_seqnum, incoming_seqnum
async with aiosqlite.connect(*self.conn_args, **self.conn_kwargs) as db:
await db.execute(
SEQNUM_UPDATE,
(self._outgoing_seqnum, self._incoming_seqnum,
self.sender_comp_id, self.target_comp_id)
)
await db.commit()
async def get_outgoing_seqnum(self) -> int:
return self._outgoing_seqnum
async def set_outgoing_seqnum(self, seqnum: int) -> None:
self._outgoing_seqnum = seqnum
async with aiosqlite.connect(*self.conn_args, **self.conn_kwargs) as db:
await db.execute(
SEQNUM_UPDATE_OUTGOING,
(self._outgoing_seqnum, self.sender_comp_id, self.target_comp_id)
)
await db.commit()
async def get_incoming_seqnum(self) -> int:
return self._incoming_seqnum
async def set_incoming_seqnum(self, seqnum: int) -> None:
self._incoming_seqnum = seqnum
async with aiosqlite.connect(*self.conn_args, **self.conn_kwargs) as db:
await db.execute(
SEQNUM_UPDATE_INCOMING,
(self._incoming_seqnum, self.sender_comp_id, self.target_comp_id)
)
await db.commit()
async def save_message(self, buf: bytes) -> None:
async with aiosqlite.connect(*self.conn_args, **self.conn_kwargs) as db:
message = buf.decode('ascii')
await db.execute(
MESSAGE_INSERT,
(self.sender_comp_id, self.target_comp_id,
self._outgoing_seqnum, self._incoming_seqnum, message)
)
await db.commit()
class SqlStore(Store):
def __init__(
self,
conn_args: List[Any],
conn_kwargs: Mapping[str, Any]
) -> None:
self.conn_args = conn_args
self.conn_kwargs = conn_kwargs
conn = sqlite3.connect(*self.conn_args, **self.conn_kwargs)
cursor = conn.cursor()
cursor.execute(CREATE_SEQNUM_TABLE_SQL)
cursor.execute(CREATE_MESSAGE_TABLE_SQL)
self._sessions: MutableMapping[str, SqlSession] = dict()
def get_session(self, sender_comp_id: str, target_comp_id: str) -> Session:
key = sender_comp_id + '\x01' + target_comp_id
if key in self._sessions:
return self._sessions[key]
session = SqlSession(self.conn_args, self.conn_kwargs,
sender_comp_id, target_comp_id)
self._sessions[key] = session
return session
|
import sys
from ctypes import *
from ctypes.util import find_library
libc = cdll.LoadLibrary(find_library("c"))
def sysctl(mib_t, c_type=None):
mib = (c_int * len(mib_t))()
for i, v in enumerate(mib_t):
mib[i] = c_int(v)
if c_type == None:
size = c_size_t(0)
libc.sysctl(mib, len(mib), None, byref(sz), None, 0)
buf = create_string_buffer(size.value)
else:
buf = c_type()
size = c_size_t(sizeof(buf))
size = libc.sysctl(mib, len(mib), byref(buf), byref(size), None, 0)
if st != 0:
raise OSError('sysctl() returned with error %d' % st)
try:
return buf.value
except AttributeError:
return buf
def sysctlbyname(name, c_type=None):
if c_type == None:
size = c_size_t(0)
libc.sysctlbyname(name, None, byref(sz), None, 0)
buf = create_string_buffer(size.value)
else:
buf = c_type()
size = c_size_t(sizeof(buf))
st = libc.sysctlbyname(name, byref(buf), byref(size), None, 0)
if st != 0:
raise OSError('sysctlbyname() returned with error %d' % st)
try:
return buf.value
except AttributeError:
return buf
|
import numpy as np
import srl
from srl.base.define import RenderType
from srl.base.env.base import EnvRun
from srl.runner import sequence
from srl.runner.callbacks import PrintProgress
class TestEnv:
def play_test(
self,
env_name: str,
check_render: bool = True,
check_restore: bool = True,
max_step: int = 0,
print_enable: bool = False,
) -> EnvRun:
# renderとrestoreの同時は想定しないとする
env = self._play_test(env_name, False, check_restore, max_step, print_enable)
if check_render:
env = self._play_test(env_name, check_render, False, max_step, print_enable)
return env
def _is_space_base_instance(self, val):
if type(val) in [int, float, list, np.ndarray]:
return True
return False
def _play_test(
self,
env_name,
check_render,
check_restore,
max_step,
print_enable,
):
env = srl.envs.make(env_name)
assert issubclass(env.__class__, EnvRun)
player_num = env.player_num
assert player_num > 0
# --- reset
env.reset()
assert self._is_space_base_instance(env.state)
for i in env.next_player_indices:
assert 0 <= i < player_num
# --- restore/backup
if check_restore:
dat = env.backup()
env.restore(dat)
assert not env.done
assert env.step_num == 0
# render
if check_render:
for mode in RenderType:
try:
env.render(mode)
except NotImplementedError:
pass
while not env.done:
# --- sample
actions = env.samples()
assert len(actions) == env.player_num
# get_invalid_actions
for idx in range(env.player_num):
invalid_actions = env.get_invalid_actions(idx)
assert isinstance(invalid_actions, list)
for a in invalid_actions:
assert isinstance(a, int)
# --- step
env.step(actions)
assert self._is_space_base_instance(env.state)
assert isinstance(env.done, bool)
assert isinstance(env.info, dict)
for i in env.next_player_indices:
assert 0 <= i < player_num
# uniq check
assert len(env.next_player_indices) == len(list(set(env.next_player_indices)))
assert len(env.step_rewards) == player_num
assert env.step_num > 0
if print_enable:
print(f"step {env.step_num}, actions {actions}, rewards {env.step_rewards}")
# --- restore/backup
if check_restore:
dat = env.backup()
env.restore(dat)
# render
if check_render:
for mode in RenderType:
try:
env.render(mode)
except NotImplementedError:
pass
if max_step > 0 and env.step_num > max_step:
break
env.close()
return env
def player_test(self, env_name: str, player: str) -> EnvRun:
env_config = srl.envs.Config(env_name)
rl_config = srl.rl.random_play.Config()
config = sequence.Config(env_config, rl_config)
env = config.make_env()
config.players = [player] * env.player_num
config.set_play_config(max_episodes=10, callbacks=[PrintProgress()])
sequence.play(config)
return env
|
from os import stat
import numpy as np
import cv2 # OpenCV
import math
from numpy.linalg.linalg import qr
class Rotations:
def __init__(self):
return
@staticmethod
def rot_mat_2_quat(R):
"""
Convert a rotation from rotation matrix to quaternion representation. Assumes that the columns of the rotation matrix are orthonomal!
"""
i, j, k = 0, 1, 2
if R[1,1] > R[0,1]:
i, j, k = 1, 2, 0
if R[2,2] > R[i,i]:
i, j, k = 2, 0, 1
t = R[i,i] - (R[j,j] + R[k,k]) + 1
q = np.array([0, 0, 0, 0])
q[0] = R[k,j] - R[j,k]
q[i+1] = t
q[j+1] = R[i,j] + R[j,i]
q[k+1] = R[k,i] + R[i,k]
q = np.multiply(q, 0.5 / math.sqrt(t))
return q
@staticmethod
def quat_2_rot_mat(q):
"""
Convert a rotation from rotation matrix to quaternion representation.
"""
s = np.linalg.norm(q) # s = 1 if the quaternion has unit length
R = np.zeros((3,3))
R[0,0] = 1 - 2 * s * (q[2]**2 + q[3]**2)
R[0,1] = 2 * s * (q[1]*q[2] - q[3]*q[0])
R[0,2] = 2 * s * (q[1]*q[3] + q[2]*q[0])
R[1,0] = 2 * s * (q[1]*q[2] + q[3]*q[0])
R[1,1] = 1 - 2 * s * (q[1]**2 + q[3]**2)
R[1,2] = 2 * s * (q[2]*q[3] - q[1]*q[0])
R[2,0] = 2 * s * (q[1]*q[3] - q[2]*q[0])
R[2,1] = 2 * s * (q[2]*q[3] + q[1]*q[0])
R[2,2] = 1 - 2 * s * (q[1]**2 + q[2]**2)
R = Rotations.orthonormal_mat(R)
return R
@staticmethod
def rot_mat_2_angle_axis(rot_mat):
theta = math.acos((np.trace(rot_mat) - 1) / 2)
return 1 / (2 * math.sin(theta)) * np.array([rot_mat[2,1] - rot_mat[1,2], rot_mat[0,2] - rot_mat[2,0], rot_mat[1,0] - rot_mat[0,1]]).reshape((3,1))
@staticmethod
def orthonormal_mat(mat):
# Perform SVD on rotation matrix to make the rows and columns orthonormal
U, _, V_transpose = np.linalg.svd(mat, full_matrices=True)
return np.matmul(U, V_transpose)
|
#Author: Diego Ramírez Barba
#Version: 1.0.0
#License: MIT
import random
from math import radians, sqrt, asin, sin, cos
#Create Data type
class Location:
def __init__(self, latitude, longitude) : #constructor
self.latitude = latitude # 90N and 90S 0 -> Ecuador
self.longitude = longitude # 180E and 180W 0 -> Greenwhich Meridian
#Generate random coordinates
def generate_random_longitude_latitude():
latitude = ("{0:.4f}".format(random.uniform(-90, 90)))
longitude = ("{0:.4f}".format(random.uniform(-180, 180)))
data = Location(float(latitude),float(longitude))
return data
#Generate a method that parse the coordinates to format "25.344 N, 63.5532 W,"
def parse_coordinates(data):
if(data.latitude < 0):
data.latitude = str(abs(data.latitude)) + ' S,'
else:
data.latitude = str(data.latitude) + ' N,'
if(data.longitude < 0):
data.longitude = str(abs(data.longitude)) + ' W,'
else:
data.longitude = str(data.longitude) + ' E,'
print(data.latitude + ' ' + data.longitude)
return data
#Great circle distance
#This method converts the data to DMS (Ddec) and the DMS to Radians and returns the distance between A and B
#Haversine formula 2r *arcsin(sqrt(sin^2(diflat/2) + cos(lat1) * cos(lat2) * sin^2(diflong/2)))
def great_circle_distance(pointA, pointB):
R = 6371 # radius of earth in Km
x1 = pointA.latitude
y1 = pointA.longitude
x2 = pointB.latitude
y2 = pointB.longitude
parse_coordinates(pointA)
parse_coordinates(pointB)
y1, x1, y2, x2 = map(radians, [y1, x1, y2, x2])
dif_lat = x2 - x1
dif_lon = y2 - y1
x = sin(dif_lat/2)**2 + cos(x1) * cos(x2) * sin(dif_lon/2)**2
y = sqrt(x)
if(y > 1):
print("Error in the limits of the haversine formula") # The long haversine formula is needed
return -1
else:
z = asin(sqrt(x))
haversine = 2 * R * z
print(str(round(haversine,4)) + ' Km')
return haversine
pointA = generate_random_longitude_latitude()
pointB = generate_random_longitude_latitude()
great_circle_distance(pointA,pointB)
|
"""Client interface to the MarkerServer to send markers"""
import pylsl
import socket
import json
from logging import getLogger
from sys import platform
log = getLogger()
def sanitize_string(marker: str) -> str:
"""sanitize a string
removes whitespace, transforms to lower case and replaces umlaute and
spaces with safer symbols
args
----
marker:str
the un-sanitized string
returns
-------
sanitized:str
the sanitized string
"""
translation = str.maketrans({"ä": "ae", "ö": "oe", "ü": "ue", " ": "_"})
marker = marker.lower().strip().translate(translation)
if marker.lower() == "ping":
marker = json.dumps({"msg": marker})
log.critical(
f"'ping' is a reserved key-word for pinging the marker-server. If you want to ping, use available. Forwarded {marker} instead"
)
if marker.lower() == "poison-pill":
marker = {"msg": marker}
marker = json.dumps(marker)
log.critical(
f"'poison-pill' is a reserved key-word for shutting down the marker-server. If you want to shut-down, use kill. Forwarded {marker} instead"
)
return marker
def push(marker: str = "", tstamp: float = None, sanitize=True, port: int = 7654):
"""push a marker to the MarkerServer for redistribution as LSL
args
----
marker: str
an ascii-encodable string describing an event. We recommend
to use valid json-strings :func:`~.push_json`
tstamp: float
the timestamp of the event. We recommend to use timestamps received
from pylsl.local_clock
sanitize: bool
whether the string is to be sanitized, see :func:`~.sanitize_string`
port: int
the port of the MarkerServer
"""
if tstamp is None:
tstamp = pylsl.local_clock()
if sanitize:
marker = sanitize_string(marker)
c = _Client(port=port)
c.push(marker, tstamp)
def push_json(marker: dict = {"key": "value"}, tstamp: float = None):
"""encode a dictionary as json and push it to the MarkerServer
args
----
marker:
a json-encodable dictionary describing an event or current settings
tstamp:
the timestamp of the event. We recommend to use timestamps received
from pylsl.local_clock
.. caution:
the resulting msg will not be sanitized :func:`~.sanitize_string`
"""
push(json.dumps(marker), tstamp, sanitize=False)
def available(port: int = 7654, host: str = "127.0.0.1", verbose=True) -> bool:
"""test whether a markerserver is already available at port
args
----
host: str
the ip of the markerserver (defaults to localhost)
port: int
the port number of the markerserver (defaults to 7654)
returns
-------
status: bool
True if available, False if not
"""
c = _Client(host=host, port=port)
try:
c.push("ping", pylsl.local_clock())
return True
except ConnectionRefusedError as e:
if verbose:
print(e)
print(f"Markerserver at {host}:{port} is not available")
return False
def kill(host: str = "127.0.0.1", port: int = 7654):
c = _Client(port=port, host=host)
c.push("poison-pill", pylsl.local_clock())
class _Client:
"Basic Client communicating with the MarkerServer"
def __init__(self, host="127.0.0.1", port: int = 7654, verbose=True):
self.host = host
self.port = port
self.verbose = verbose
def push(self, marker: str = "", tstamp: float = None):
"connects, sends a message, and close the connection"
self.connect()
self.write(marker, tstamp)
self.close()
def connect(self):
"connect wth the remote server"
self.interface = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.interface.connect((self.host, self.port))
self.interface.settimeout(1)
def write(self, marker, tstamp):
"encode message into ascii and send all bytes"
msg = json.dumps((marker, tstamp)).encode("ascii")
if self.verbose:
print(f"Sending {marker} at {tstamp}")
self.interface.sendall(msg)
def close(self):
"closes the connection"
self.interface.shutdown(1)
self.interface.close()
if "darwin" in platform: # pragma no cover
def fake_push(
marker: str = "", tstamp: float = None, sanitize=True, port: int = 7654
):
if tstamp is None:
tstamp = pylsl.local_clock()
if sanitize:
marker = sanitize_string(marker)
print(f'MacOs: fake_push "{marker}" at {tstamp}')
fake_push.__doc__ = push.__doc__
push = fake_push
|
#!/usr/bin/env python
# coding=utf-8
#
# Copyright © 2015-2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the X11 (MIT) (the “License”) set forth below;
#
# you may not use this file except in compliance with the License. Unless required by applicable law or agreed to in
# writing, software distributed under the License is distributed on an “AS IS” BASIS, without warranties or conditions
# of any kind, EITHER EXPRESS OR IMPLIED. See the License for the specific language governing permissions and
# limitations under the License. Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
#
# "THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.”
__author__ = 'yfauser'
class Error(Exception):
"""Base exception class"""
pass
class NsxError(Error):
"""
Exception raised if client is instantiated with fail_mode=raise
Attributes:
status: The HTTP status code returned by the NSX API, e.g. 400 for 'not found'
msg: The body content returned by NSX in case of an error
"""
def __init__(self, status, msg):
self.status = status
self.msg = msg
def __str__(self):
return '\nstatus code: {}\nerror message: {}'.format(self.status, self.msg)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2020 Lucas Heitzmann Gabrielli.
# This file is part of gdstk, distributed under the terms of the
# Boost Software License - Version 1.0. See the accompanying
# LICENSE file or <http://www.boost.org/LICENSE_1_0.txt>
import pathlib
import numpy
import gdstk
from tutorial_images import draw
def init_image():
path = gdstk.FlexPath(
[(0, 5), (0, 0), (5, 0), (15, 10), (15, -5)],
[0.8, 0.8, 0.8, 0.8],
1.0,
joins=["natural", "bevel", "miter", "round"],
ends=["flush", "extended", (0.4, 0.8), "round"],
layer=[0, 1, 2, 3],
)
return gdstk.Cell("init").add(path)
def init0_image():
points = [(0, 8), (0, 0), (8, 0), (18, 13), (18, -8)]
path_1 = gdstk.FlexPath(points, 1, datatype=1)
path_2 = gdstk.FlexPath(points, 1, bend_radius=3)
return gdstk.Cell("init0").add(path_2, path_1)
def init1_image():
def custom_broken_join(p0, v0, p1, v1, center, width):
p0 = numpy.array(p0)
v0 = numpy.array(v0)
p1 = numpy.array(p1)
v1 = numpy.array(v1)
center = numpy.array(center)
# Calculate intersection point p between lines defined by
# p0 + u0 * v0 (for all u0) and p1 + u1 * v1 (for all u1)
den = v1[1] * v0[0] - v1[0] * v0[1]
lim = 1e-12 * (v0[0] ** 2 + v0[1] ** 2) * (v1[0] ** 2 + v1[1] ** 2)
if den ** 2 < lim:
# Lines are parallel: use mid-point
u0 = u1 = 0
p = 0.5 * (p0 + p1)
else:
dx = p1[0] - p0[0]
dy = p1[1] - p0[1]
u0 = (v1[1] * dx - v1[0] * dy) / den
u1 = (v0[1] * dx - v0[0] * dy) / den
p = 0.5 * (p0 + v0 * u0 + p1 + v1 * u1)
if u0 <= 0 and u1 >= 0:
# Inner corner
return [p]
# Outer corner
return [p0, center, p1]
def custom_pointy_end(p0, v0, p1, v1):
p0 = numpy.array(p0)
v0 = numpy.array(v0)
p1 = numpy.array(p1)
v1 = numpy.array(v1)
r = 0.5 * numpy.sqrt(numpy.sum((p0 - p1) ** 2))
v0 /= numpy.sqrt(numpy.sum(v0 ** 2))
v1 /= numpy.sqrt(numpy.sum(v1 ** 2))
return [p0, 0.5 * (p0 + p1) + 0.5 * (v0 - v1) * r, p1]
path = gdstk.FlexPath(
[(0, 5), (0, 0), (5, 0), (15, 10), (15, -5)],
3,
joins=custom_broken_join,
ends=custom_pointy_end,
)
return gdstk.Cell("init1").add(path)
def horizontal_image():
path = gdstk.FlexPath((0, 0), 0.2)
path.horizontal(2, width=0.4, relative=True)
path.horizontal(2, offset=[0.4], relative=True)
path.horizontal(2, relative=True)
assert (
numpy.max(
numpy.abs(
path.spine()
- numpy.array([[0.0, 0.0], [2.0, 0.0], [4.0, 0.0], [6.0, 0.0]])
)
)
== 0
)
return gdstk.Cell("horizontal").add(path)
def segment_image():
points = [(1, 0), (1, 1), (-1, 1), (-1, -1), (1, -1)]
path_1 = gdstk.FlexPath((0, 0), 0.2)
path_1.segment(points, 0.6)
path_2 = gdstk.FlexPath((3, 0), [0.1, 0.1], 0.2)
path_2.segment(points, offset=0.6, relative=True)
return gdstk.Cell("segment").add(path_1, path_2)
def cubic_image():
path = gdstk.FlexPath((0, 0), 0.2, tolerance=1e-3)
path.cubic([(0, 1), (1, 1), (1, 0)])
path.cubic([(1, -1), (2, -1), (2.5, -0.5), (3, 0), (3, 1), (2, 1)], width=0.5)
return gdstk.Cell("cubic").add(path)
def cubic_smooth_image():
path = gdstk.FlexPath((0, 0), 0.2, tolerance=1e-3)
path.cubic([(0, 1), (1, 1), (1, 0)])
path.cubic_smooth([(2, -1), (2.5, -0.5), (3, 1), (2, 1)], width=0.5)
return gdstk.Cell("cubic_smooth").add(path)
def bezier_image():
path = gdstk.FlexPath((0, 0), 0.2, tolerance=1e-3)
path.bezier([(4, 1), (4, 3), (0, 5), (-4, 3), (-4, -2), (0, -4)])
return gdstk.Cell("bezier").add(path)
def interpolation_image():
half_pi = numpy.pi / 2
points = [(4, 1), (4, 3), (0, 5), (-4, 3), (-4, -2), (0, -4)]
angles = [half_pi, None, None, None, -half_pi, -half_pi, None]
path_1 = gdstk.FlexPath((0, 0), 0.2, tolerance=1e-3)
path_1.interpolation(points, cycle=True)
path_2 = gdstk.FlexPath((6, -8), 0.2, tolerance=1e-3)
path_2.interpolation(points, angles, cycle=True, relative=True)
return gdstk.Cell("interpolation").add(path_1, path_2)
def arc_image():
path = gdstk.FlexPath((0, 0), [0.2, 0.3], 0.4, tolerance=1e-3)
path.vertical(5)
path.arc(2.5, numpy.pi, 0)
path.arc(5, -numpy.pi, -numpy.pi / 2)
return gdstk.Cell("arc").add(path)
def parametric_image():
def spiral(u):
rad = 2 * u ** 0.5
ang = 3 * numpy.pi * u
return (rad * numpy.cos(ang), rad * numpy.sin(ang))
path = gdstk.FlexPath((0, 0), 0.2, tolerance=1e-3)
path.parametric(spiral)
return gdstk.Cell("parametric").add(path)
def commands_image():
path = gdstk.FlexPath((0, 0), [0.2, 0.4, 0.2], 0.5, tolerance=1e-3)
path.commands(
"l",
3,
4,
"A",
2,
numpy.arctan2(3, -4),
numpy.pi / 2,
"h",
0.5,
"a",
3,
-numpy.pi,
)
return gdstk.Cell("commands").add(path)
if __name__ == "__main__":
path = pathlib.Path(__file__).parent.absolute() / "flexpath"
path.mkdir(parents=True, exist_ok=True)
draw(init_image(), path)
draw(init0_image(), path)
draw(init1_image(), path)
draw(horizontal_image(), path)
draw(segment_image(), path)
draw(cubic_image(), path)
draw(cubic_smooth_image(), path)
draw(bezier_image(), path)
draw(interpolation_image(), path)
draw(arc_image(), path)
draw(parametric_image(), path)
draw(commands_image(), path)
|
from django.middleware.csrf import rotate_token # _compare_salted_tokens,
from rest_framework.permissions import BasePermission
class HasCsrfTokenValid(BasePermission):
def has_permission(self, request, view):
token_valid = False
try:
csrf_token = request.headers.get("api-csrftoken")
csrf_cookie = request.META.get("CSRF_COOKIE")
"""
Check if both alphanumerics(strings) values are differents to prevent
a malicious user get the csrf cookie and send it from the ajax.
"""
if csrf_token == csrf_cookie:
rotate_token(request)
return False
token_valid = True #_compare_salted_tokens(csrf_token, csrf_cookie)
except ValueError: # if csrf_token & csrf_cookie are not a valid alphanumeric
return False
return token_valid
|
from typing import Tuple
import torch
from torch import Tensor
import gym3
from gym3.types import Discrete, TensorType
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
class GridWorld(gym3.Env):
"""Gridworld with no reward and no episode termination. Following gym3 interface"""
def __init__(self, n_env: int, n: int, device: torch.device):
ob_shape = (n_env, 2) # n x n gridworld where each row indicates agent's position
ac_shape = (n_env, )
ob_space = TensorType(Discrete(n), ob_shape)
ac_space = TensorType(Discrete(4), ac_shape)
super(GridWorld, self).__init__(ob_space, ac_space, n_env)
self.state = torch.ones(ob_shape, device=device, dtype=torch.long) * (n // 2)
self.reward = torch.zeros(ac_shape, device=device, dtype=torch.float)
self.first = torch.ones(ac_shape, device=device, dtype=torch.bool)
self.n = n
self.ob_shape = ob_shape
self.ac_shape = ac_shape
self.device = device
def act(self, ac: Tensor) -> None:
"""act one environment step
Args:
ac (Tensor): action of shape (n_env, ) and each value is 0~3
"""
next_state = self.get_next_state(self.state, ac)
reward = self.compute_reward(next_state)
first = self.is_done(next_state)
next_state = self.reset_episode(next_state, first)
self.state = next_state
self.reward = reward
self.first = first
def observe(self) -> Tuple[Tensor, Tensor, Tensor]:
return self.reward, self.state, self.first
def get_next_state(self, state: Tensor, ac: Tensor) -> Tensor:
up = -1 * (ac == UP)
down = ac == DOWN
left = -1 * (ac == LEFT)
right = ac == RIGHT
state[:, 0] = state[:, 0] + up + down
state[:, 1] = state[:, 1] + left + right
state = torch.clamp(state, 0, self.n-1)
return state
def compute_reward(self, state: Tensor) -> Tensor:
return torch.zeros(self.ac_shape, device=self.device)
def is_done(self, state: Tensor) -> Tensor:
return torch.zeros(self.ac_shape, device=self.device)
def reset_episode(self, next_state: Tensor, first: Tensor) -> Tensor:
"""reset episode if finished
Args:
next_state:
first:
Returns:
"""
return next_state
|
import pytest
from datetime import datetime, timedelta
from time import sleep
from tpgnow.cache import Cache
from tpgnow.model import Stop
TIME_DELAY = 5
class TestCache(object):
@pytest.fixture
def cache(self):
return Cache()
@pytest.fixture
def key(self):
return "key"
@pytest.fixture
def value(self):
return "value"
def test_noWrite_read(self, cache, key):
assert cache.read(key) is None
def test_writeAndReadString(self, cache, key, value):
cache.write(key, value)
assert cache.read(key) is value
def test_writeAndReadObject(self, cache, key):
value = Stop("code", "name")
cache.write(key, value)
assert cache.read(key) is value
def test_cacheReadBeforeTimeoutDelay(self, cache, key, value):
cache.write(key, value, delay=5) # seconds
sleep(TIME_DELAY-2)
assert cache.read(key) is value
def test_cacheReadBeforeTimeoutDate(self, cache, key, value):
inTenSeconds = datetime.now() + timedelta(seconds=TIME_DELAY)
cache.write(key, value, timeout=inTenSeconds) # seconds
sleep(TIME_DELAY-2)
assert cache.read(key) is value
def test_cacheReadAfterTimeout(self, cache, key, value):
cache.write(key, value, delay=TIME_DELAY) # seconds
sleep(TIME_DELAY+2)
assert cache.read(key) is None
def test_cacheReadAfterTimeout_assertKeyDeleted(self, cache, key, value):
cache.write(key, value, delay=TIME_DELAY) # seconds
sleep(TIME_DELAY+2)
cache.read(key)
assert key not in cache.store
def test_noWriteHas(self, cache, key):
assert not cache.has(key)
def test_writeHas(self, cache, key, value):
cache.write(key, value)
assert cache.has(key)
def test_hasBeforeTimeoutDelay(self, cache, key, value):
cache.write(key, value, delay=5) # seconds
sleep(TIME_DELAY-2)
assert cache.has(key)
def test_hasBeforeTimeoutDate(self, cache, key, value):
inTenSeconds = datetime.now() + timedelta(seconds=TIME_DELAY)
cache.write(key, value, timeout=inTenSeconds) # seconds
sleep(TIME_DELAY-2)
assert cache.has(key)
def test_hasAfterTimeout(self, cache, key, value):
cache.write(key, value, delay=TIME_DELAY) # seconds
sleep(TIME_DELAY+2)
assert not cache.has(key)
def test_cacheReadAfterTimeout_assertKeyDeleted(self, cache, key, value):
cache.write(key, value, delay=TIME_DELAY) # seconds
sleep(TIME_DELAY+2)
cache.has(key)
assert key not in cache.store
def test_singleton(self):
cache1 = Cache.Instance()
cache2 = Cache.Instance()
assert cache1 is cache2
|
from optable.synthesis import manipulation_candidate
from optable.dataset import feature_types
from optable.manipulations.aggregations import aggregation
class MeanManipulation(aggregation.AggregationManipulation):
def __init__(self, path, dataset, col):
super(MeanManipulation, self).__init__(
path, dataset, col, "Mean",
"mean", "mean", False)
def calculate_priority(self):
return 0.2 + 0.2 * self.path.not_deeper_count \
+ 0.2 * self.path.substance_to_many_count(self.dataset, self.col) \
* self.path.to_many_path_priority(self.dataset, self.col)
def meta_feature_size():
return 3
def meta_feature(self):
to_many_meta = self.path.substance_to_many_count(
self.dataset, self.col) \
* self.path.to_many_path_priority(
self.dataset, self.col)
return [1, self.path.not_deeper_count, to_many_meta]
def meta_feature_name():
return [
"Mean-Constant",
"Mean-NotDeeperCount",
"Mean-ToManyMeta"
]
class MeanCandidate(manipulation_candidate.ManipulationCandidate):
def search(self, path, dataset):
if path.is_to_many:
dst_table = dataset.tables[path.dst]
ret = []
for col in dst_table.df.columns:
if path.is_substance_to_one_with_col(dataset, col):
continue
ftype = dst_table.ftypes[col]
if ftype == feature_types.numerical \
or ftype == feature_types.mc_processed_numerical \
or ftype == feature_types.c_processed_numerical \
or ftype == feature_types.t_processed_numerical:
ret.append(MeanManipulation(path, dataset, col))
return ret
else:
return []
|
# use virtual env from nb-requirements.txt
import kfp
from kfp import dsl
from kubernetes.client.models import V1EnvVar
from kfp.onprem import use_k8s_secret
from components.preprocess import preprocess
from components.train import train
import os
from pathlib import Path
OUTPUT_DIRECTORY = 'generated'
PROJECT_ROOT = Path(__file__).absolute().parent
web_downloader_op = kfp.components.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/master/components/contrib/web/Download/component.yaml')
preprocess_op = kfp.components.create_component_from_func(
func=preprocess,
output_component_file=os.path.join(PROJECT_ROOT, OUTPUT_DIRECTORY,
'preprocess-component.yaml'),
# This is optional. It saves the component spec for future use.
base_image='python:3.9',
packages_to_install=['pandas', 'pyarrow'])
training_op = kfp.components.create_component_from_func(
func=train,
output_component_file=os.path.join(PROJECT_ROOT, OUTPUT_DIRECTORY,
'train-component.yaml'),
# This is optional. It saves the component spec for future use.
base_image='python:3.9',
packages_to_install=['pandas', 'pyarrow', 'sklearn', 'mlflow', 'boto3'])
deploy_op = kfp.components.load_component_from_file(
os.path.join(PROJECT_ROOT, 'components', 'deploy', 'component.yaml'))
@dsl.pipeline(
name="e2e_wine_pipeline",
description="WINE pipeline",
)
def wine_pipeline(url):
web_downloader_task = web_downloader_op(url=url)
preprocess_task = preprocess_op(file=web_downloader_task.outputs['data'])
train_task = (training_op(file=preprocess_task.outputs['output'])
.add_env_variable(V1EnvVar(name='MLFLOW_TRACKING_URI',
value='http://mlflow-server.kubeflow.svc.cluster.local:5000'))
.add_env_variable(V1EnvVar(name='MLFLOW_S3_ENDPOINT_URL',
value='http://minio.kubeflow.svc.cluster.local:9000'))
# https://kubeflow-pipelines.readthedocs.io/en/stable/source/kfp.extensions.html#kfp.onprem.use_k8s_secret
.apply(use_k8s_secret(secret_name='mlpipeline-minio-artifact',
k8s_secret_key_to_env={
'accesskey': 'AWS_ACCESS_KEY_ID',
'secretkey': 'AWS_SECRET_ACCESS_KEY',
})))
deploy_task = deploy_op(model_uri=train_task.output)
if __name__ == '__main__':
pipeline_output = os.path.join(PROJECT_ROOT, OUTPUT_DIRECTORY,
'wine-pipeline.yaml')
kfp.compiler.Compiler().compile(wine_pipeline, pipeline_output)
print('Generated the wine pipeline definition')
# client = kfp.Client()
# client.create_run_from_pipeline_func(
# wine_pipeline,
# arguments={
# "url": "https://raw.githubusercontent.com/Barteus/kubeflow-examples/main/e2e-wine-kfp-mlflow/winequality-red.csv",
# })
|
from dataclasses import dataclass, field
from typing import Dict, List
from raytkUtil import ROPInfo, RaytkContext, Version, showMessageDialog
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
class Updater:
def __init__(self, ownerComp: 'COMP'):
self.ownerComp = ownerComp
# This MUST remain publicly available since OPs reference it for the `Updateop` par handler.
def UpdateOP(self, o: 'COMP'):
self._log(f'Updating {o}')
info = ROPInfo(o)
if not info:
self._showError(f'Unable to update {o}, it must be a ROP or RComp')
return
master = o.par.clone.eval()
if not master and o.par.clone.val.startswith('/raytk/'):
path = o.par.clone.val # type: str
if path.startswith('/raytk/'):
path = path.replace('/raytk/', '')
master = parent.raytk.op(path)
if not master:
self._showError(f'Unable to update {o}, no clone master found')
return
o.par.clone = master
self._log(f'Updating {o} using master {master}')
o.par.enablecloningpulse.pulse()
img = o.op('*Definition/opImage')
if img:
o.par.opviewer.val = img
o.viewer = True
o.par.clone = master.par.clone.val
def _showError(self, msg: str):
self._log(msg)
showMessageDialog(
title='Warning',
text=msg,
escOnClickAway=True,
)
def _log(self, msg: str):
print(self.ownerComp, msg)
ui.status = msg
class _Updater:
def __init__(self, ownerComp: 'COMP'):
self.ownerComp = ownerComp
self.mappings = [] # type: List[Mapping]
self.currentOpTypes = {} # type: Dict[str, COMP]
def loadSettings(self):
migrations = self.ownerComp.op('opMigrations')
self.mappings = [
Mapping(
str(migrations[i, 'fromOpType']),
int(migrations[i, 'fromOpVersion']),
Version(migrations[i, 'fromToolkitVersion']),
str(migrations[i, 'toOpType']),
int(migrations[i, 'toOpVersion']),
Version(migrations[i, 'toToolkitVersion']),
)
for i in range(1, migrations.numRows)
]
opTypes = self.ownerComp.op('opTable')
self.currentOpTypes = {
str(opTypes[i, 'opType']): op(opTypes[i, 'path'])
for i in range(1, opTypes.numRows)
}
@staticmethod
def getSelectedOps() -> 'List[COMP]':
return RaytkContext().currentROPs()
def runMigration(self, validate=True, perform=False):
self.loadSettings()
rops = self.getSelectedOps()
if not rops:
return
migration = Migration(
RaytkContext().toolkit(), rops,
validate=validate, perform=perform,
)
valid = True
if validate:
for o in rops:
if not self.processOp(o, migration, validate=True, update=False):
valid = False
if perform and (valid or not validate):
for o in rops:
self.processOp(o, migration, validate=False, update=True)
def processOp(self, o: 'OP', migration: 'Migration', validate: bool, update: bool) -> bool:
ropInfo = ROPInfo(o)
t = ropInfo.opType
v = int(ropInfo.opVersion)
master = self.currentOpTypes.get(t)
pass
@dataclass
class Mapping:
fromOpType: str
fromOpVersion: int
fromToolkitVersion: Version
toOpType: str
toOpVersion: int
toToolkitVersion: Version
@dataclass
class Migration:
toolkit: 'COMP'
rops: 'List[COMP]' = field(default_factory=list)
errors: 'List[str]' = field(default_factory=list)
warnings: 'List[str]' = field(default_factory=list)
validate: bool = True
perform: bool = False
class MigrationHandler:
def check(self, ropInfo: ROPInfo, migration: 'Migration') -> bool:
pass
def update(self, ropInfo: ROPInfo, migration: 'Migration'):
pass
|
from plark_game.classes.pantherAgent import Panther_Agent
class Panther_Agent_Move_North(Panther_Agent):
def __init__(self):
pass
def getAction(self, state):
return self.action_lookup(0)
|
import pandas as pd
import math
from reportlab.lib.colors import black, white, Color, yellow
from reportlab.lib.styles import ParagraphStyle
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.units import inch, cm
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import Paragraph
FONT_NAME = "UnifrakturMaguntia"
pdfmetrics.registerFont(TTFont(FONT_NAME, 'UnifrakturMaguntia.ttf'))
tarot_cards = pd.read_csv("tarot.csv")
# WIDTH, HEIGHT = (4.711 * inch, 6.282 * inch) #ipad mini
WIDTH, HEIGHT = (2.539 * inch, 5.496 * inch) #iphone
CORNER_SQUARE_SIDE = 0.4 * inch
CORNER_BORDER = 0.1 * inch
F_COLOR, B_COLOR, D_COLOR, FADE_COLOR, HALF_FADE_COLOR = (white, black, Color(0.13, 0, 0), Color(0.3, 0.33, 0.33), Color(0.4, 0.43, 0.43))
c = Canvas("TAROT.pdf", pagesize=(WIDTH, HEIGHT))
def draw_corner(c, x1, y1, x2, y2, s, bottom_corner_flip=False):
c.saveState()
c.setFont("Helvetica", 10)
c.setStrokeColor(FADE_COLOR)
c.setFillColor(D_COLOR)
c.setLineWidth(0.01)
c.rect(x1, y1, CORNER_SQUARE_SIDE, CORNER_SQUARE_SIDE, fill=1, stroke=1)
# c.line(x1, y1, x2, y2)
# c.line(x2, y1, x1, y2)
if isinstance(s, str):
c.setFillColor(F_COLOR)
c.setStrokeColor(F_COLOR)
c.setLineWidth(3)
x_center = x1 + (x2 - x1) / 2.0
y_center = y1 + (y2 - y1) / 2.0
x_1st_4th = x1 + (x2 - x1) / 4.0
x_2nd_4th = x1 + (x2 - x1) / 4.0 * 2.0
x_3rd_4th = x1 + (x2 - x1) / 4.0 * 3.0
y1_border = y1 + (y2 - y1) / 6.0
y2_border = y2 - (y2 - y1) / 6.0
if s == "X":
c.setStrokeColor(HALF_FADE_COLOR)
c.line(x_1st_4th, y1_border, x_3rd_4th, y2_border)
c.line(x_3rd_4th, y1_border, x_1st_4th, y2_border)
if s == "V":
c.setStrokeColor(HALF_FADE_COLOR)
c.line(x_1st_4th, y2_border, x_center, y1_border)
c.line(x_center, y1_border, x_3rd_4th, y2_border)
c.line(x_center, y1_border-CORNER_SQUARE_SIDE*0.03, x_center, y1_border+CORNER_SQUARE_SIDE*0.03)
if s == "\\":
c.setStrokeColor(FADE_COLOR)
c.line(x_1st_4th, y2_border, x_3rd_4th, y1_border)
if s == "/":
c.setStrokeColor(FADE_COLOR)
c.line(x_1st_4th, y1_border, x_3rd_4th, y2_border)
if s == "I":
c.setStrokeColor(HALF_FADE_COLOR)
c.line(x_center, y1_border, x_center, y2_border)
if s == "II":
c.setStrokeColor(HALF_FADE_COLOR)
x_1st_3rd = x1 + (x2-x1)/3.0
x_2nd_3rd = x1 + (x2-x1)/3.0*2.0
c.line(x_1st_3rd, y1_border, x_1st_3rd, y2_border)
c.line(x_2nd_3rd, y1_border, x_2nd_3rd, y2_border)
if s == "III":
c.setStrokeColor(HALF_FADE_COLOR)
c.line(x_1st_4th, y1_border, x_1st_4th, y2_border)
c.line(x_2nd_4th, y1_border, x_2nd_4th, y2_border)
c.line(x_3rd_4th, y1_border, x_3rd_4th, y2_border)
if s =="🞡":
c.setStrokeColor(FADE_COLOR)
if bottom_corner_flip:
c.transform(1, 0, 0, -1, 0, y1 + CORNER_SQUARE_SIDE + CORNER_BORDER)
c.line(x_center, y1_border, x_center, y2_border)
y_sword = y1_border + (x_3rd_4th - x_1st_4th)/2.0
c.line(x_1st_4th, y_sword, x_3rd_4th, y_sword)
if s == "O":
c.setStrokeColor(FADE_COLOR)
if bottom_corner_flip:
c.transform(1, 0, 0, -1, 0, y1 + CORNER_SQUARE_SIDE + CORNER_BORDER)
c.setLineWidth(2.2)
r = CORNER_SQUARE_SIDE * 0.38
c.circle(x_center,y_center, r, fill=0)
c.setLineWidth(1.2)
pentagon = []
for n in range(0, 5):
x = x_center + r * math.cos(math.radians(90 + n * 72))
y = y_center + r * math.sin(math.radians(90 + n * 72))
pentagon.append([x, y])
c.line(pentagon[0][0], pentagon[0][1], pentagon[2][0], pentagon[2][1])
c.line(pentagon[2][0], pentagon[2][1], pentagon[4][0], pentagon[4][1])
c.line(pentagon[4][0], pentagon[4][1], pentagon[1][0], pentagon[1][1])
c.line(pentagon[1][0], pentagon[1][1], pentagon[3][0], pentagon[3][1])
c.line(pentagon[3][0], pentagon[3][1], pentagon[0][0], pentagon[0][1])
if s == "U":
c.setStrokeColor(FADE_COLOR)
if bottom_corner_flip:
c.transform(1, 0, 0, -1, 0, y1 + CORNER_SQUARE_SIDE + CORNER_BORDER)
c.setLineWidth(2)
y_cup_bottom = y_center-CORNER_SQUARE_SIDE * 0.09
c.arc(x_1st_4th, y_cup_bottom, x_3rd_4th, y2+CORNER_SQUARE_SIDE * (1.0/3.0), 180, 180)
# c.line(x_1st_4th, y2_border, x_1st_4th, y_center)
c.line(x_center, y_cup_bottom, x_center, y1_border)
c.line(x_1st_4th, y1_border, x_3rd_4th, y1_border)
c.restoreState()
for i, upper_left, upper_right, bottom_left, bottom_right, card_name in \
zip(
tarot_cards.index,
tarot_cards["a"],
tarot_cards["b"],
tarot_cards["c"],
tarot_cards["d"],
tarot_cards["e"]
):
# card_name = card_name.upper()
print(f"{upper_left} {card_name} {upper_right}\n{bottom_left} {bottom_right}\n \n")
c.setFillColor(B_COLOR)
c.rect(0, 0, WIDTH, HEIGHT, fill=1)
from PIL import Image
im_path = f"my_images_dark/{i}.png"
im = Image.open(im_path)
img_width, img_height = im.size
scaled_img_width = (img_width/img_height) * HEIGHT
c.drawImage(im_path, WIDTH/2.0 - scaled_img_width/2.0, 0, height=HEIGHT, width=scaled_img_width, preserveAspectRatio=True)
c.setFillColor(F_COLOR)
FONT_NAME = "UnifrakturMaguntia"
FONT_SIZE = 17
c.setFont(FONT_NAME, FONT_SIZE)
style_card_name = ParagraphStyle('card-name', fontName=FONT_NAME, fontSize=FONT_SIZE, textColor=F_COLOR)
p = Paragraph(card_name, style=style_card_name)
text_width, text_height = p.wrapOn(c, 111111111, 11111111)
# p.drawOn(c, WIDTH - text_width/2.0, HEIGHT - CORNER_SQUARE_SIDE/2.0 - CORNER_BORDER - text_height/2.0)
c.setFillColor(FADE_COLOR)
c.drawCentredString(WIDTH/2.0, CORNER_SQUARE_SIDE/2.0 + CORNER_BORDER - text_height/2.0, card_name)
draw_corner(c, CORNER_BORDER, CORNER_BORDER, CORNER_SQUARE_SIDE+CORNER_BORDER, CORNER_SQUARE_SIDE+CORNER_BORDER, bottom_left)
draw_corner(c, WIDTH-CORNER_SQUARE_SIDE-CORNER_BORDER, CORNER_BORDER, WIDTH-CORNER_BORDER, CORNER_SQUARE_SIDE+CORNER_BORDER, bottom_right, True)
draw_corner(c, CORNER_BORDER, HEIGHT-CORNER_SQUARE_SIDE-CORNER_BORDER, CORNER_SQUARE_SIDE+CORNER_BORDER, HEIGHT-CORNER_BORDER, upper_left)
draw_corner(c, WIDTH-CORNER_SQUARE_SIDE-CORNER_BORDER, HEIGHT-CORNER_SQUARE_SIDE-CORNER_BORDER, WIDTH-CORNER_BORDER, HEIGHT-CORNER_BORDER, upper_right)
c.showPage()
c.save()
|
"""Liquid template definition."""
from __future__ import annotations
from collections import abc
from io import StringIO
from pathlib import Path
from typing import Awaitable
from typing import Dict
from typing import Any
from typing import Iterator
from typing import Mapping
from typing import Optional
from typing import TextIO
from typing import Union
from typing import TYPE_CHECKING
from liquid.context import Context
from liquid.context import ReadOnlyChainMap
from liquid.exceptions import LiquidInterrupt
from liquid.exceptions import LiquidSyntaxError
from liquid.exceptions import Error
if TYPE_CHECKING: # pragma: no cover
from liquid import Environment
from liquid.ast import ParseTree
from liquid.loaders import UpToDate
class BoundTemplate:
"""A liquid template that has been parsed and is bound to a
:class:`liquid.Environment`.
You probably don't want to instantiate :class:`BoundTemplate` directly. Use
:meth:`liquid.Environment.from_string` or :meth:`liquid.Environment.get_template`
instead.
:param env: The environment this template is bound to.
:type env: liquid.Environment
:param parse_tree: The parse tree representing this template.
:type parse_tree: liquid.ast.ParseTree
:param name: Optional name of the template. Defaults to an empty string.
:type name: Optional[str]
:param path: Optional origin path or identifier for the template.
:type path: Optional[Union[str, Path]]
:param globals: An optional mapping of context variables made available every
time the resulting template is rendered. Defaults to ``None``.
:type globals: Optional[Dict[str, object]]
:param matter: Optional mapping containing variables associated with the template.
Could be "front matter" or other meta data.
:type matter: Optional[Mapping[str, object]]
:param uptodate: Optional callable that will return ``True`` if the template is up
to date, or ``False`` if it needs to be reloaded. Defaults to ``None``.
:type uptodate: Optional[Callable[[], bool]]
"""
# pylint: disable=redefined-builtin, too-many-arguments
def __init__(
self,
env: Environment,
parse_tree: ParseTree,
name: str = "",
path: Optional[Union[str, Path]] = None,
globals: Optional[Dict[str, object]] = None,
matter: Optional[Mapping[str, object]] = None,
uptodate: UpToDate = None,
):
self.env = env
self.tree = parse_tree
self.globals = globals or {}
self.matter = matter or {}
self.name = name
self.path = path
self.uptodate = uptodate
def render(self, *args: Any, **kwargs: Any) -> str:
"""Render the template with `args` and `kwargs` included in the render context.
Accepts the same arguments as the :class:`dict` constructor.
"""
context = Context(self.env, globals=self.make_globals(dict(*args, **kwargs)))
buf = StringIO()
self.render_with_context(context, buf)
return buf.getvalue()
async def render_async(self, *args: Any, **kwargs: Any) -> str:
"""An async version of :meth:`liquid.template.BoundTemplate.render`."""
context = Context(self.env, globals=self.make_globals(dict(*args, **kwargs)))
buf = StringIO()
await self.render_with_context_async(context, buf)
return buf.getvalue()
def render_with_context(
self,
context: Context,
buffer: TextIO,
*args: Any,
partial: bool = False,
block_scope: bool = False,
**kwargs: Any,
) -> None:
"""Render the template using an existing context and output buffer.
``args`` and ``kwargs`` are passed to the :class:`dict` constructor. The
resulting dictionary is added to the render context.
:param context: A render context.
:param buffer: File-like object to which rendered text is written.
:param partial: If `True`, indicates that the current template has been
included using either a "render" or "include" tag. Defaults to ``False``.
:param block_scope: If `True`, indicates that assigns, breaks and continues
from this template will not leak into the parent context. Defaults to
``False``.
"""
# "template" could get overridden from args/kwargs, "partial" will not.
namespace = self.make_partial_namespace(partial, dict(*args, **kwargs))
with context.extend(namespace=namespace):
for node in self.tree.statements:
try:
node.render(context, buffer)
except LiquidInterrupt as err:
# If this is an "included" template, there could be a for loop
# in a parent template. A for loop that could be interrupted
# from an included template.
#
# Convert the interrupt to a syntax error if there is no parent.
if not partial or block_scope:
self.env.error(
LiquidSyntaxError(
f"unexpected '{err}'", linenum=node.token().linenum
)
)
else:
raise
except Error as err:
# Raise or warn according to the current mode.
self.env.error(err, linenum=node.token().linenum)
async def render_with_context_async(
self,
context: Context,
buffer: TextIO,
*args: Any,
partial: bool = False,
block_scope: bool = False,
**kwargs: Any,
) -> None:
"""An async version of `render_with_context`."""
# "template" could get overridden from args/kwargs, "partial" will not.
namespace = self.make_partial_namespace(partial, dict(*args, **kwargs))
with context.extend(namespace=namespace):
for node in self.tree.statements:
try:
await node.render_async(context, buffer)
except LiquidInterrupt as err:
# If this is an "included" template, there could be a for loop
# in a parent template. A for loop that could be interrupted
# from an included template.
#
# Convert the interrupt to a syntax error if there is no parent.
if not partial or block_scope:
self.env.error(
LiquidSyntaxError(
f"unexpected '{err}'", linenum=node.token().linenum
)
)
else:
raise
except Error as err:
# Raise or warn according to the current mode.
self.env.error(err, linenum=node.token().linenum)
@property
def is_up_to_date(self) -> bool:
"""False if the template was modified since it was last parsed,
True otherwise."""
if not self.uptodate:
return True
uptodate = self.uptodate()
if not isinstance(uptodate, bool):
raise Error(
f"expected a boolean from uptodate, found {type(uptodate).__name__}"
)
return uptodate
async def is_up_to_date_async(self) -> bool:
"""An async version of the ``is_up_to_date`` property.
If ``template.uptodate`` is a coroutine, it wil be awaited. Otherwise it will be
called just like ``is_up_to_date``
"""
if not self.uptodate:
return True
uptodate = self.uptodate()
if isinstance(uptodate, Awaitable):
return await uptodate
return uptodate
def make_globals(self, render_args: Mapping[str, object]) -> Mapping[str, object]:
"""Return a mapping aggregated from render arguments, template globals and
matter variables."""
return ReadOnlyChainMap(
render_args,
self.matter,
self.globals,
)
# pylint: disable=no-self-use
def make_partial_namespace(
self,
partial: bool,
render_args: Mapping[str, object],
) -> Mapping[str, object]:
"""Return a namespace dictionary. This is used by `render_with_context` to
extend an existing context."""
return {**render_args, "partial": partial}
def __repr__(self) -> str:
return (
f"Template(name='{self.name}', "
f"path='{self.path}', uptodate={self.is_up_to_date})"
) # pragma: no cover
class AwareBoundTemplate(BoundTemplate):
"""A `BoundTemplate` subclass that automatically includes a `TemplateDrop` in the
global namespace."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.drop = TemplateDrop(self.name, self.path)
def make_partial_namespace(
self,
partial: bool,
render_args: Mapping[str, object],
) -> abc.Mapping[str, object]:
return {
"template": self.drop,
**super().make_partial_namespace(partial, render_args),
}
class TemplateDrop(Mapping[str, Optional[str]]):
"""Template meta data mapping."""
def __init__(self, name: str, path: Optional[Union[str, Path]]):
self.name = name
self.path = path
if not self.path or isinstance(self.path, str):
self.path = Path(self.name)
self.stem = self.path.stem
self.suffix: Optional[str] = None
if "." in self.stem:
self.suffix = self.stem.split(".")[-1]
self._items = {
"directory": self.path.parent.name,
"name": self.path.name.split(".")[0],
"suffix": self.suffix,
}
def __str__(self) -> str:
return self.stem
def __repr__(self) -> str:
return (
f"TemplateDrop(directory='{self['directory']}', "
f"name='{self['name']}', suffix='{self['suffix']}')"
) # pragma: no cover
def __contains__(self, item: object) -> bool:
return item in self._items
def __getitem__(self, key: object) -> Optional[str]:
return self._items[str(key)]
def __len__(self) -> int:
return len(self._items)
def __iter__(self) -> Iterator[str]:
return iter(self._items)
|
from muwgs.application.crosswords import crosswords
from sys import path
path.append('..')
print(crosswords('b*eed'))
|
#!/usr/bin/python
##########
#
# Generate HTML table
#
##########
import lib_result_report as librr
import csv
import glob
import math
import numpy as np
import os.path
import shutil
import sys
from collections import namedtuple
from PIL import Image
from enum import Enum
input_path_postfix = '/output/'
output_path_root = '/home/mhsung/app/cuboid-prediction/output/'
output_dir_prefix = ''
gt_n = [1, 0, 0]
gt_t = 0
def load_instances(input_filepath, output_filepath, symemtry_part_index):
output_file_prefix = os.path.basename(os.path.normpath(input_filepath + '/../'))
dirnames = glob.glob(input_filepath + '/*')
print(output_file_prefix + '_ours.csv')
print(output_file_prefix + '_podolak.csv')
ours_file = open(output_file_prefix + '_ours.csv', 'w')
podolak_file = open(output_file_prefix + '_podolak.csv', 'w')
for dirname in dirnames:
if not os.path.isdir(dirname):
continue
prefix = os.path.basename(dirname)
print prefix
is_loaded = True
candidate_index = librr.find_best_candidate(dirname, prefix)
print('Candidate index: ' + str(candidate_index))
relative_image_filepath = []
image_filenames = []
image_filenames.append(prefix + '_view.png')
image_filenames.append(prefix + '_input.png')
image_filenames.append(prefix + '_' + str(candidate_index) + '.png')
image_filenames.append(prefix + '_' + str(candidate_index) + '_symmetry_accuracy.png')
image_filenames.append(prefix + '_' + str(candidate_index) + '_symmetry_completeness.png')
image_filenames.append(prefix + '_' + str(candidate_index) + '_database_accuracy.png')
image_filenames.append(prefix + '_' + str(candidate_index) + '_database_completeness.png')
image_filenames.append(prefix + '_' + str(candidate_index) + '_fusion_accuracy.png')
image_filenames.append(prefix + '_' + str(candidate_index) + '_fusion_completeness.png')
for image_filename in image_filenames:
if not os.path.exists(dirname + '/' + image_filename):
print 'Warning: File does not exist: "' + (dirname + '/' + image_filename) + '"'
is_loaded = False
break
# Get relative file path.
relative_image_filepath.append('./' + image_filename)
if not is_loaded:
continue
ours_symm_filename_postfix = '_' + str(candidate_index) + '_symmetry_info.txt'
ours_symm_filename = dirname + '/' + prefix + ours_symm_filename_postfix
if os.path.exists(ours_symm_filename):
with open(ours_symm_filename, 'r') as csv_file:
data = csv.reader(csv_file, delimiter=',')
x_data = data.next()
n = np.array([float(x_data[1]), float(x_data[2]), float(x_data[3])])
t = float(x_data[4])
n = n / np.linalg.norm(n)
ours_file.write(str(n[0]) + ',' + str(n[1]) + ',' + str(n[2]) + ',' + str(t) + '\n')
podolak_symm_filename_postfix = '_symmetry_info.txt'
podolak_symm_filename = dirname + '/../../symmetry_detection/' + prefix + '/' + prefix + podolak_symm_filename_postfix
print(podolak_symm_filename)
if os.path.exists(podolak_symm_filename):
with open(podolak_symm_filename, 'r') as csv_file:
data = csv.reader(csv_file, delimiter=',')
x_data = data.next()
n = np.array([float(x_data[1]), float(x_data[2]), float(x_data[3])])
t = float(x_data[4])
n = n / np.linalg.norm(n)
podolak_file.write(str(n[0]) + ',' + str(n[1]) + ',' + str(n[2]) + ',' + str(t) + '\n')
ours_file.close()
podolak_file.close()
def main():
input_path, output_path, dataset_name, symmetry_part_names = librr.parse_arguments(
input_path_postfix, output_path_root, output_dir_prefix)
load_instances(input_path, output_path, -1)
main()
|
from datetime import datetime
from app import db
from sqlalchemy.schema import Sequence
# id_seq is required for auto id generation(cx_Oracle)
id_seq = Sequence('id_seq')
ingredientList = db.Table('Ingredient_List',
db.Column('Ingredient_name', db.String(255), db.ForeignKey('INGREDIENT.name')),
db.Column('Recipe_id', db.Integer(), db.ForeignKey('RECIPE.id'))
)
categoryList = db.Table('Category_List',
db.Column('Category_name', db.String(255), db.ForeignKey('CATEGORY.name')),
db.Column('Recipe_id', db.Integer(), db.ForeignKey('RECIPE.id'))
)
class User(db.Model):
__tablename__ = 'USER'
id = db.Column(db.Integer(), id_seq,
server_default=id_seq.next_value(), primary_key=True)
email = db.Column(db.String(255))
username = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
fname = db.Column(db.String(255))
mname = db.Column(db.String(255))
lname = db.Column(db.String(255))
registerdate = db.Column(db.DateTime())
menus = db.relationship('Menu', backref='USER', lazy=True)
class Recipe(db.Model):
__tablename__ = 'RECIPE'
id = db.Column(db.Integer(), primary_key=True)
direction = db.Column(db.String(255))
fat = db.Column(db.Integer())
date = db.Column(db.DateTime(timezone=True))
calories = db.Column(db.Integer())
description = db.Column(db.String(255))
protein = db.Column(db.Integer())
rating = db.Column(db.Integer(),nullable=False)
title = db.Column(db.String(255))
ingredientList = db.relationship('Ingredient', secondary=ingredientList, lazy='subquery',cascade="save-update, merge, delete",
backref=db.backref('Recipes', lazy=True))
ingredientDescription = db.Column(db.String(255))
sodium = db.Column(db.Integer())
categoryList = db.relationship('Category', secondary=categoryList, lazy='subquery',cascade="save-update, merge, delete",
backref=db.backref('Categories', lazy=True))
def to_json(self,ingdic:dict, catdic:dict):
return {
"direction": self.direction,
"id": int(self.id),
"date": self.date.strftime("%Y%m%d"),
"fat": int(self.fat),
"calories": int(self.calories),
"description": self.description,
"protein": int(self.protein),
"rating": int(self.rating),
"title": self.title,
"ingredientList": [i.get("Ingredient_name") for i in ingdic],
"ingredientDescription": self.ingredientDescription,
"sodium": int(self.sodium),
"categoryList": [i.get("Category_name") for i in catdic]
}
class Menu(db.Model):
__tablename__ = 'MENU'
id = db.Column(db.Integer(), id_seq,
server_default=id_seq.next_value(), primary_key=True)
userId = db.Column(db.Integer(), db.ForeignKey('USER.id'), nullable=False)
recipeList = db.Column(db.String(255))
name = db.Column(db.String(255))
class Category(db.Model):
__tablename__ = 'CATEGORY'
name = db.Column(db.String(255), primary_key=True)
class Ingredient(db.Model):
__tablename__ = 'INGREDIENT'
name = db.Column(db.String(255), primary_key=True)
class MenuRate(db.Model):
__tablename__ = 'MENURATE'
id = db.Column(db.Integer(), id_seq,
server_default=id_seq.next_value(), primary_key=True)
menuId = db.Column(db.Integer(), db.ForeignKey('MENU.id'), nullable=False)
userId = db.Column(db.Integer(), db.ForeignKey('USER.id'), nullable=False)
rate = db.Column(db.Integer())
class Review(db.Model):
__tablename__ = "REVIEW"
id = db.Column(db.Integer(), primary_key=True)
username = db.Column(db.String(255), db.ForeignKey('USER.username'), nullable=False)
rating = db.Column(db.Float())
comments = db.Column(db.String(255))
recipeid = db.Column(db.Integer(), db.ForeignKey('RECIPE.id'), nullable=False)
|
import numpy as np
import os
try:
import pymatgen as pm
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp.inputs import Kpoints
has_pm = True
except:
has_pm = False
def get_kpts(screener,cif_file,level):
"""
Obtain the number of kpoints
Args:
screener (class): pymofscreen.screener class
cif_file (string): name of CIF file
level (string): accuracy level
Returns:
kpts (list of ints): kpoint grid
gamma (bool): True for gamma-centered
"""
niggli = screener.niggli
mofpath = screener.mofpath
kpts_path = screener.kpts_path
kppas = screener.kppas
kpts = None
if not mofpath:
mofpath = ''
if kpts_path == 'Auto' and has_pm:
if level == 'low':
kppa = kppas[0]
elif level == 'high':
kppa = kppas[1]
else:
raise ValueError('kpoints accuracy level not defined')
filepath = os.path.join(mofpath,cif_file)
if '.cif' in cif_file:
parser = CifParser(filepath)
pm_mof = parser.get_structures(primitive=niggli)[0]
else:
pm_mof = pm.Structure.from_file(filepath,primitive=niggli)
pm_kpts = Kpoints.automatic_density(pm_mof,kppa)
kpts = pm_kpts.kpts[0]
if pm_kpts.style.name == 'Gamma':
gamma = True
else:
gamma = None
elif kpts_path == 'Auto' and not has_pm:
raise ValueError('Pymatgen not installed. Please provide a kpts file.')
else:
old_cif_name = cif_file.split('.cif')[0].split('_')[0]
infile = open(kpts_path,'r')
lines = infile.read().splitlines()
infile.close()
for i in range(len(lines)):
if old_cif_name in lines[i]:
if level == 'low':
kpts = lines[i+1]
gamma = lines[i+2]
elif level == 'high':
kpts = lines[i+3]
gamma = lines[i+4]
else:
raise ValueError('Incompatible KPPA with prior runs')
break
kpts = np.squeeze(np.asarray(np.matrix(kpts))).tolist()
if not kpts or len(kpts) != 3:
raise ValueError('Error parsing k-points for '+cif_file)
if gamma == 'True':
gamma = True
elif gamma == 'False':
gamma = False
else:
raise ValueError('Error parsing gamma for '+cif_file)
return kpts, gamma
|
import pandas as pd
import requests
from tqdm import tqdm
import os
from os import listdir
from os.path import isfile, join
from datetime import datetime
from functools import cache
"""
Test welke van de leden+descendants in een refset er in de VT (totaal en lijst gyn) zitten.
146481000146103 |simpele referentieset met obstetrische verrichtingen (metadata)|
Plaats het VT Excel Release-bestand in ./resources
1) Maakt een lijst van een SNOMED refset en de descendants van die refsetleden.
2) Leest een release-bestand van de Verrichtingenthesaurus in
- Vergelijkt elke rij uit 2 met 1. Toont True/False in het output.xlsx bestand in kolom D.
Run met python3 refset+descendants_vs_vt.py. Kies in de dialoog het juist excel bestand en download output.xlsx.
"""
### Config ###
# Snowstorm URL - include trailing forward slash
snowstorm_url = "https://snowstorm.test-nictiz.nl/"
snomed_branch = 'MAIN/SNOMEDCT-NL'
snomed_versie = 'live-20210331'
# Dataframes VT creëren
files_in_folder = [f for f in listdir("./resources") if isfile(join("./resources", f))]
i=0
print("Bestanden in map:")
print("-"*80)
file_1 = False
file_2 = False
file_3 = False
for file in files_in_folder:
file_type = file.split("_")[-1:]
if file_type[0] == "ThesaurusConceptRol.csv":
thesaurusConceptRollen = pd.read_csv("./resources/"+file)
file_1 = file
if file_type[0] == "ThesaurusConcept.csv":
thesaurusConcepten = pd.read_csv("./resources/"+file)
file_2 = file
if file_type[0] == "ThesaurusTerm.csv":
thesaurusTermen = pd.read_csv("./resources/"+file)
file_3 = file
if file_1 and file_2 and file_3:
print("Bronbestanden gevonden.")
else:
exit("Niet alle bronbestanden aanwezig.")
print("-"*80)
print("-"*80)
print(file_1)
print(thesaurusConceptRollen.head())
print("-"*80)
print(file_2)
print(thesaurusConcepten.head())
print("-"*80)
print(file_3)
print(thesaurusTermen.head())
print("-"*80)
print("-"*80)
# Ophalen termen
@cache
def fetchTerms(conceptid):
url = f"{snowstorm_url}{snomed_branch}/concepts/{conceptid}/"
req = requests.get(url)
response = req.json()
if req.status_code == 200:
return response
else:
return {}
# Ophalen refset members
@cache
def fetchEcl(ecl):
concepts = []
url = f"{snowstorm_url}{snomed_branch}/concepts?ecl={ecl}&limit=10000&returnIdOnly=true"
# print(url)
req = requests.get(url)
response = req.json()
total = response.get('total',0)
while len(concepts) < total:
concepts += response.get('items',[])
url = f"{snowstorm_url}{snomed_branch}/concepts?ecl={ecl}&limit=10000&searchAfter={response.get('searchAfter')}&returnIdOnly=true"
# print(url)
req = requests.get(url)
response = req.json()
return concepts
conceptID_list = fetchEcl("^146481000146103")
print(f"{len(conceptID_list)} refsetleden opgehaald. Nu de descendants.")
# Descendants van refsetleden ophalen, en toevoegen aan lijst
deduplicated_list_ecl = conceptID_list.copy()
deduplicated_list_descendants = []
for concept in tqdm(deduplicated_list_ecl):
deduplicated_list_descendants += fetchEcl(f"<{concept}")
# Lijsten dedupliceren
deduplicated_list_ecl = list(set(deduplicated_list_ecl))
print(len(deduplicated_list_ecl), "concepten in refset.")
deduplicated_list_descendants = list(set(deduplicated_list_descendants))
print(len(deduplicated_list_descendants), "concepten in descendants.")
deduplicated_list_total = deduplicated_list_ecl + deduplicated_list_descendants
print(len(deduplicated_list_total), "concepten in totaal.")
print("-"*80)
# Lijst met thesaurusconcept ID's na filter creeren
thesaurusIDs = thesaurusConceptRollen['ConceptID'].values
# Iterate over kolom met Thesaurus ID's
print("SNOMED -> VT vergelijken")
output = []
for thesaurusID in tqdm(list(set(thesaurusIDs))):
thesaurusConcept = thesaurusConcepten[
(thesaurusConcepten['ConceptID'] == thesaurusID) & (thesaurusConcepten['Einddatum'] == 20991231)
]
thesaurusTerm = thesaurusTermen[
(thesaurusTermen['ConceptID'] == thesaurusID) &
(thesaurusTermen['Einddatum'] == 20991231) &
(thesaurusTermen['TypeTerm'] == 'voorkeursterm')
]
try:
SCTID = int(thesaurusConcept['SnomedID'])
except:
SCTID = False
try:
term = thesaurusTerm['Omschrijving'].values[0]
except:
term = False
groepCode = thesaurusConceptRollen[
thesaurusConceptRollen['ConceptID'] == thesaurusID
]['SpecialismeGroepCode'].values[0]
in_ecl = (SCTID in deduplicated_list_ecl)
in_descendants = (SCTID in deduplicated_list_descendants)
output.append({
'ThesaurusID' : str(thesaurusID),
'Snomed ID' : str(SCTID),
'Snomed FSN' : fetchTerms(SCTID).get('fsn',{}).get('term',None),
'Voorkeursterm' : term,
'SpecialismeGroepCode' : str(groepCode),
'SCTID in refset': in_ecl,
'SCTID in descendants van refsetleden': in_descendants,
})
print("-"*80)
# Iterate over refset members, controleer of ze in de VT zitten
print("VT -> SNOMED vergelijken")
output2 = []
for SCTID in tqdm(deduplicated_list_total):
present = False
thesaurusTerm = False
vt_concept = False
vt_concept_specialisme = False
for ConceptID in thesaurusConcepten[(thesaurusConcepten['SnomedID'] == SCTID) & (thesaurusConcepten['Einddatum'] == 20991231)]['ConceptID']:
present = True
vt_concept = ConceptID
try:
thesaurusTerm = thesaurusTermen[
(thesaurusTermen['ConceptID'] == ConceptID) &
(thesaurusTermen['Einddatum'] == 20991231) &
(thesaurusTermen['TypeTerm'] == 'voorkeursterm')
]['Omschrijving'].values[0]
except:
continue
try:
vt_concept_specialisme = thesaurusConceptRollen[
(thesaurusConceptRollen['ConceptID'] == ConceptID) &
(thesaurusTermen['Einddatum'] == 20991231)
]['SpecialismeGroepCode'].values[0]
except:
continue
output2.append({
'Snomed ID' : str(SCTID),
'Snomed FSN' : fetchTerms(SCTID).get('fsn',{}).get('term',None),
'Refset lid' : (SCTID in deduplicated_list_ecl),
'Descendant van refsetlid' : (SCTID in deduplicated_list_descendants),
'ThesaurusID' : str(vt_concept),
'Voorkeursterm VT' : thesaurusTerm,
'SpecialismeGroepCode' : vt_concept_specialisme,
'SNOMED Concept in VT': present,
})
print("-"*80)
# Exporteren naar Excel
print("Exporteren naar excel")
export_comment = input("Opmerkingen voor in het output-bestand? ")
now = datetime.now()
date_time = now.strftime("%m-%d-%Y_%H:%M:%S")
writer = pd.ExcelWriter(f"output_{date_time}.xlsx", engine='xlsxwriter')
# Sheet 1 met metadata
metadata_df = pd.DataFrame([
{'key' : 'Scriptnaam', 'value' : os.path.basename(__file__)},
{'key' : 'Export time', 'value' : date_time},
{'key' : 'SNOMED versie', 'value' : snomed_versie},
{'key' : 'Snowstorm URL', 'value' : snowstorm_url},
{'key' : 'VT bronbestand[0]', 'value' : file_1},
{'key' : 'VT bronbestand[1]', 'value' : file_2},
{'key' : 'VT bronbestand[2]', 'value' : file_3},
{'key' : 'Opmerkingen', 'value' : export_comment},
])
metadata_df.to_excel(writer, 'Metadata')
# Sheet 2 met resultaten - VT vs ECL
output_df = pd.DataFrame(output)
output_df.to_excel(writer, 'VT -> SNOMED')
# Sheet 3 met resultaten - ECL vs VT
output_df = pd.DataFrame(output2)
output_df.to_excel(writer, 'SNOMED -> VT')
writer.save()
print("-"*80)
print("-"*80)
print(f"Klaar - download output_{date_time}.xlsx voor resultaten.")
print("-"*80)
print("-"*80)
|
from abc import abstractmethod
from collections import Counter
import numpy as np
from probability2 import Key
from probability2 import Distribution
class EmpiricalDistribution(Distribution):
def __init__(self, samples):
"""Construct an abstract distribution and count the number of
occurenc of items in the samples.
Args:
samples (iterable):
An iterable that contains the observed samples
or a dictionary of (key:frequency).
Raises:
ValueError: Raises when the provided sample is None.
"""
super().__init__()
if samples is None:
raise ValueError("samples argument is None.")
self._counter = Counter(samples)
# Elements count
self.total = sum(self._counter.values())
def _check_keys_consistencies_(self):
rv_len = self.get_random_variable().size
def compare_single_elements():
try:
keys = iter(self.keys())
first_row = next(keys)
except StopIteration: # Empty rows
return
first_row_type = type(first_row)
if isinstance(first_row, tuple):
# For single elements that are tuple
# we check both type and length
first_row_len = len(first_row)
for row in keys:
if not isinstance(row, first_row_type):
raise ValueError(
"The type of the 'factors' are not consistence."
)
if len(row) != first_row_len:
raise ValueError(
"The length of the 'factors' are not consistence."
)
else: # For other single elements, we just
# check the type
for row in keys:
if not isinstance(row, first_row_type):
raise ValueError(
"The type of the 'factors' are not consistence."
)
def compare_multilevel_elements():
# We suppose the keys were tuples
# and each Random Variable (RV) is positioned
# in a fix place of the n-tuple.
# Therefore, the levels of the RV can be
# found by iterating over each tuple's item
# Convert each features line to tuple
tuples = (Key(row) for row in self.keys())
try:
first_row = next(tuples)
except StopIteration: # Empty rows
return
first_row_len = len(first_row)
first_row_types = [type(item) for item in first_row]
for row in tuples:
# compair length
if len(row) != first_row_len:
raise ValueError("The length of the 'factors' are not consistence.")
# compair row's elements type
comparisions = [
isinstance(element, type_1)
for element, type_1 in zip(row, first_row_types)
]
if not all(comparisions):
raise ValueError("The types of the 'factors' are not consistence.")
if rv_len > 1:
compare_multilevel_elements()
else:
compare_single_elements()
@staticmethod
def digitize(samples, start, stop, num=10, endpoint=True, right=False, levels=None):
"""[summary]
Args:
samples (numeric array):
continouse values that needs digitization.
start (numeric):
The starting value of the sequence.
stop (numeric):
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num (int, optional):
Number of samples to generate. Default is 10. Must be non-negative.
endpoint (bool, optional):
If True, `stop` is the last sample. Otherwise, it is not included.
Defaults to True.
right (bool):
Indicating whether the intervals include the right or the left bin
edge. Default behavior is (right==False) indicating that the interval
does not include the right edge. The left bin end is open in this
case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
levels (list, optional):
List of labels for each step. Defaults to None.
Returns:
numpy array:
[type]: An array of bins/levels of digitized samples.
"""
bins = np.linspace(start, stop, num, endpoint=endpoint)
return EmpiricalDistribution.digitize_bin(samples, bins, right, levels)
@staticmethod
def digitize_bin(samples, bins, right=False, levels=None):
"""Return the digitized samples by returning the coresponding bins value.
When 'levels' is provided, bins values replace by levels.
========= ============= ============================
`right` order of bins returned index `i` satisfies
========= ============= ============================
``False`` increasing ``bins[i-1] <= x < bins[i]``
``True`` increasing ``bins[i-1] < x <= bins[i]``
``False`` decreasing ``bins[i-1] > x >= bins[i]``
``True`` decreasing ``bins[i-1] >= x > bins[i]``
========= ============= ============================
Args:
samples (numeric array):
continouse values that needs digitization.
bins (array):
Array of bins. It has to be 1-dimensional and monotonic.
right (bool):
Indicating whether the intervals include the right or
the left bin edge. Default behavior is (right==False)
indicating that the interval does not include the right
edge. The left bin end is open in this case, i.e.,
bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
levels (list, optional):
List of labels for each step. Defaults to None.
Raises:
ValueError:
Raises when the length of levels is not equal to
the length of bins minus one.
Returns:
numpy array:
An array of bins/levels of digitized samples.
"""
if levels is not None and len(levels) != len(bins) - 1:
raise ValueError(
f"'levels' length ({len(levels)}) is not "
f"equal to bins length-1 ({len(bins)-1})."
)
indices = np.digitize(samples, bins, right)
if levels is None:
# Extend the bins to include left outliers
delta_left = bins[1] - bins[0]
bins_extended = np.r_[[bins[0] - delta_left], bins]
return bins_extended[indices]
# Extend the levels to include outliers
levels_extended = np.r_[["less"], levels, ["more"]]
return levels_extended[indices]
def normalise(self):
"""Normalise the distribution."""
for k in self._counter:
self._counter[k] = self._counter[k] / self.total
self.total = 1.0
def probability(self, key):
"""Gets the probability of the random variable, when its value is 'key'.
It return zero if the value is not observed.
Args:
key (object):
the value of the random variable.
Returns:
float: probability of the random variable.
"""
if self.total == 0:
return 0
return self.__getitem__(key) / self.total
def freq(self, *args, **kwargs):
key = self.get_random_variable().to_key(*args, **kwargs)
return self.frequency(key, normalised=False)
def frequency(self, key, normalised=False):
"""Gets the frequency of the random variable, when its value is 'key'.
It return zero if the value is not observed.
Args:
key (object):
the value of the random variable.
normalised (bool, optional):
normalize the return. Defaults to False.
Returns:
int or float: frequency or probability of the random variable.
"""
if self.total == 0:
return 0
if normalised:
return self._counter[key] / self.total
else:
return self._counter[key]
def keys(self):
return self._counter.keys()
def items(self):
return self._counter.items()
def frequencies(self, normalised=True):
"""A list of frequencies of class occurenc.
Args:
normalised (bool, optional):
The normalisation flag. Defaults to True.
Returns:
list: A list of floats or integers, depending of normalisation.
"""
if self.total == 0:
return np.zeros(len(self._counter.keys()))
values = np.array(list(self._counter.values()))
if normalised:
return np.array(values) / self.total
return np.array(values)
def keys_as_list(self):
return [k for k in self._counter.keys()]
def most_common(self, num: int = None):
"""List the n most common elements and their counts from the most
common to the least. If n is None, then list all element counts.
Args:
num (int, optional):
The maximum length of the returned list. Defaults to None.
Returns:
list: A list of tuples. The first element of the tuple is a class
key and th second one is its count.
"""
return self._counter.most_common(num)
@abstractmethod
def get_random_variable(self):
pass
@abstractmethod
def summary(self):
pass
@abstractmethod
def to_table(self, normalised=False, sort=False):
pass
@abstractmethod
def __mul__(self, that):
pass
@abstractmethod
def __rmul__(self, that):
pass
def __getitem__(self, key):
"""An indexer that returns the count of the class key.
Returns zero if the 'key' does not exist in samples or
the samples iterator is empty.
Args:
key (object):
The key that specifies the class name in samples.
Returns:
[float]: The count of occurrence of class 'key'.
Returns zero if the 'key' does not exist in samples or
the samples iterator is empty.
"""
if self.total == 0:
return 0
if isinstance(key, slice):
return list(self._counter.items())[key]
return self._counter[key]
def __contains__(self, key):
return key in self._counter
|
import numpy as np
import subprocess
from rich.console import Console
c = Console()
## ------------- FUNCTIONS ----------------
def create_temp_ini(pos:tuple,sampling:str,runs:int,blocklength:int):
content = f"""[DEFAULT]
[simulation]
runs = {runs}
blocks = {blocklength}
x0 = {pos[0]}
y0 = {pos[1]}
z0 = {pos[2]}
delta = 1
sampling = {sampling}
[settings]
test = true
logger_debug = true
data_debug = false
timeseries = false
"""
file = open("tempfile.ini","w+")
file.write(content)
file.close()
def clean():
subprocess.run("./clean.sh",shell=True)
### -------------- MAIN -------------------
samplings = ("normal","gauss")
starting_positions = (
(0,0,0),
(1,1,1),
(2,2,2),
(3,3,3),
(4,4,4),
(10,0,-10)
)
clean()
for i1, sampling in enumerate(samplings):
c.print(f" ---------------------------- {sampling} ---------------------------- ",style="bold blue on white")
for i2, start in enumerate(starting_positions):
print("Iteration: ",i2)
print("Starting position: ",start)
create_temp_ini(start,sampling,100000,100)
p = subprocess.Popen("make && ./main tempfile.ini", shell=True, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
out, err = p.communicate()
print("")
print("\n")
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import pytest
from superset.utils.core import (
AdhocColumn,
AdhocMetric,
ExtraFiltersReasonType,
ExtraFiltersTimeColumnType,
GenericDataType,
get_column_name,
get_column_names,
get_metric_name,
get_metric_names,
get_time_filter_status,
is_adhoc_metric,
NO_TIME_RANGE,
)
from tests.unit_tests.fixtures.datasets import get_dataset_mock
STR_METRIC = "my_metric"
SIMPLE_SUM_ADHOC_METRIC: AdhocMetric = {
"aggregate": "SUM",
"column": {
"column_name": "my_col",
"type": "INT",
"type_generic": GenericDataType.NUMERIC,
},
"expressionType": "SIMPLE",
"label": "my SUM",
}
SQL_ADHOC_METRIC: AdhocMetric = {
"expressionType": "SQL",
"label": "my_sql",
"sqlExpression": "SUM(my_col)",
}
STR_COLUMN = "my_column"
SQL_ADHOC_COLUMN: AdhocColumn = {
"hasCustomLabel": True,
"label": "My Adhoc Column",
"sqlExpression": "case when foo = 1 then 'foo' else 'bar' end",
}
def test_get_metric_name_saved_metric():
assert get_metric_name(STR_METRIC) == "my_metric"
assert get_metric_name(STR_METRIC, {STR_METRIC: "My Metric"}) == "My Metric"
def test_get_metric_name_adhoc():
metric = deepcopy(SIMPLE_SUM_ADHOC_METRIC)
assert get_metric_name(metric) == "my SUM"
assert get_metric_name(metric, {"my SUM": "My Irrelevant Mapping"}) == "my SUM"
del metric["label"]
assert get_metric_name(metric) == "SUM(my_col)"
metric["label"] = ""
assert get_metric_name(metric) == "SUM(my_col)"
del metric["aggregate"]
assert get_metric_name(metric) == "my_col"
metric["aggregate"] = ""
assert get_metric_name(metric) == "my_col"
assert get_metric_name(metric, {"my_col": "My Irrelevant Mapping"}) == "my_col"
metric = deepcopy(SQL_ADHOC_METRIC)
assert get_metric_name(metric) == "my_sql"
assert get_metric_name(metric, {"my_sql": "My Irrelevant Mapping"}) == "my_sql"
del metric["label"]
assert get_metric_name(metric) == "SUM(my_col)"
metric["label"] = ""
assert get_metric_name(metric) == "SUM(my_col)"
def test_get_metric_name_invalid_metric():
metric = deepcopy(SIMPLE_SUM_ADHOC_METRIC)
del metric["label"]
del metric["column"]
with pytest.raises(ValueError):
get_metric_name(metric)
metric = deepcopy(SIMPLE_SUM_ADHOC_METRIC)
del metric["label"]
metric["expressionType"] = "FOO"
with pytest.raises(ValueError):
get_metric_name(metric)
metric = deepcopy(SQL_ADHOC_METRIC)
del metric["label"]
metric["expressionType"] = "FOO"
with pytest.raises(ValueError):
get_metric_name(metric)
def test_get_metric_names():
assert get_metric_names(
[STR_METRIC, SIMPLE_SUM_ADHOC_METRIC, SQL_ADHOC_METRIC]
) == ["my_metric", "my SUM", "my_sql"]
assert get_metric_names(
[STR_METRIC, SIMPLE_SUM_ADHOC_METRIC, SQL_ADHOC_METRIC],
{STR_METRIC: "My Metric"},
) == ["My Metric", "my SUM", "my_sql"]
def test_get_column_name_physical_column():
assert get_column_name(STR_COLUMN) == "my_column"
assert get_metric_name(STR_COLUMN, {STR_COLUMN: "My Column"}) == "My Column"
def test_get_column_name_adhoc():
column = deepcopy(SQL_ADHOC_COLUMN)
assert get_column_name(column) == "My Adhoc Column"
assert (
get_column_name(column, {"My Adhoc Column": "My Irrelevant Mapping"})
== "My Adhoc Column"
)
del column["label"]
assert get_column_name(column) == "case when foo = 1 then 'foo' else 'bar' end"
column["label"] = ""
assert get_column_name(column) == "case when foo = 1 then 'foo' else 'bar' end"
def test_get_column_names():
assert get_column_names([STR_COLUMN, SQL_ADHOC_COLUMN]) == [
"my_column",
"My Adhoc Column",
]
assert get_column_names(
[STR_COLUMN, SQL_ADHOC_COLUMN],
{"my_column": "My Column"},
) == ["My Column", "My Adhoc Column"]
def test_get_column_name_invalid_metric():
column = deepcopy(SQL_ADHOC_COLUMN)
del column["label"]
del column["sqlExpression"]
with pytest.raises(ValueError):
get_column_name(column)
def test_is_adhoc_metric():
assert is_adhoc_metric(STR_METRIC) is False
assert is_adhoc_metric(SIMPLE_SUM_ADHOC_METRIC) is True
assert is_adhoc_metric(SQL_ADHOC_METRIC) is True
def test_get_time_filter_status_time_col():
dataset = get_dataset_mock()
assert get_time_filter_status(
dataset, {ExtraFiltersTimeColumnType.TIME_COL: "ds"}
) == ([{"column": ExtraFiltersTimeColumnType.TIME_COL}], [])
def test_get_time_filter_status_time_range():
dataset = get_dataset_mock()
assert get_time_filter_status(
dataset, {ExtraFiltersTimeColumnType.TIME_RANGE: "1 year ago"}
) == ([{"column": ExtraFiltersTimeColumnType.TIME_RANGE}], [])
def test_get_time_filter_status_time_grain():
dataset = get_dataset_mock()
assert get_time_filter_status(
dataset, {ExtraFiltersTimeColumnType.TIME_GRAIN: "PT1M"}
) == ([{"column": ExtraFiltersTimeColumnType.TIME_GRAIN}], [])
def test_get_time_filter_status_no_temporal_col():
dataset = get_dataset_mock()
dataset.columns[0].is_dttm = False
assert get_time_filter_status(
dataset, {ExtraFiltersTimeColumnType.TIME_COL: "foobar"}
) == (
[],
[
{
"reason": ExtraFiltersReasonType.COL_NOT_IN_DATASOURCE,
"column": ExtraFiltersTimeColumnType.TIME_COL,
}
],
)
assert get_time_filter_status(
dataset, {ExtraFiltersTimeColumnType.TIME_RANGE: "1 year ago"}
) == (
[],
[
{
"reason": ExtraFiltersReasonType.NO_TEMPORAL_COLUMN,
"column": ExtraFiltersTimeColumnType.TIME_RANGE,
}
],
)
assert get_time_filter_status(
dataset, {ExtraFiltersTimeColumnType.TIME_GRAIN: "PT1M"}
) == (
[],
[
{
"reason": ExtraFiltersReasonType.NO_TEMPORAL_COLUMN,
"column": ExtraFiltersTimeColumnType.TIME_GRAIN,
}
],
)
|
# Generated by Django 2.2.5 on 2019-10-09 12:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, verbose_name='Name')),
('date', models.DateField(verbose_name='Date')),
('price', models.PositiveIntegerField(default=0, verbose_name='Price')),
('unit', models.IntegerField(choices=[(0, 'шт.'), (1, 'кг.')], default=0, verbose_name='Unit')),
('provider', models.CharField(blank=True, max_length=128, verbose_name='Provider')),
],
options={
'verbose_name': 'Product',
'verbose_name_plural': 'Products',
},
),
]
|
from __future__ import absolute_import, print_function
try:
from pathlib import Path
Path().expanduser()
except (ImportError,AttributeError):
from pathlib2 import Path
try:
import tempfile
tempfile.TemporaryDirectory
except (ImportError,AttributeError):
from backports import tempfile
try:
FileNotFoundError = FileNotFoundError
except NameError:
FileNotFoundError = IOError
|
from selenium import webdriver
from selenium.webdriver.support.ui import Select
import time
import autoit
# Between the quotes, enter your credentials. Make sure there are no extra spaces.
username = ""
password = ""
# Between the quotes, enter the time you come in and the time you leave. Don't forget AM and PM.
timeIn = "9:00 AM"
timeOut = "6:00 PM"
# Lunch break
lunch = "1:00"
browser = webdriver.Chrome()
browser.set_page_load_timeout(30)
browser.get("https://my.springahead.com/go/Account/Logon/REPLACE_WITH_COMPANY'S_NAME/")
userID = browser.find_elements_by_id("UserName")
userID[0].send_keys(username)
browser.find_element_by_id("Password").send_keys(password)
browser.find_element_by_class_name("submit").click()
browser.find_element_by_class_name("small").click()
workday = 3
while workday <= 11:
try:
browser.find_element_by_css_selector("tbody.timedaySectionBody:nth-child("+str(workday)+") > tr:nth-child(2) > td:nth-child(2) > "
"div:nth-child(1) > button:nth-child(1)").click()
select = Select(browser.find_element_by_css_selector("#timedayTable > tbody:nth-child("+str(workday)+") > "
"tr.timeRowModel.editor_content.timedayEdit.timedayCalc > "
"td.timedayProject > select"))
select.select_by_index(1)
browser.find_element_by_class_name("timein_input").send_keys(timeIn)
browser.find_element_by_class_name("timeout_input").send_keys(timeOut)
browser.find_element_by_class_name("timebreak_input").send_keys(lunch)
browser.find_element_by_css_selector("tr.timedayEdit:nth-child(5) > td:nth-child(5) > button:nth-child(1)").click()
except:
pass
time.sleep(0.5)
workday += 2
# Clicks submit button
browser.find_element_by_id("submitall").click()
time.sleep(1)
# Does the final submission for the timecard
autoit.send("{ENTER}")
time.sleep(10)
browser.quit()
|
import os
from exps.basketball.basketball_yolox_l import Exp as MyExp
class Exp(MyExp):
def __init__(self):
super().__init__()
self.num_classes = 2
self.exp_name = "boxing_glove"
self.basketball_detection_dir = "2022"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.