hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a543b635a30f628934cc8f9195a488cc475f1ee1
| 7,295
|
py
|
Python
|
bqplot/traits.py
|
maartenbreddels/bqplot
|
cbd37f7acf94c8dddf929e9d2485d2b102ce49b9
|
[
"Apache-2.0"
] | 4
|
2020-12-17T21:19:00.000Z
|
2021-09-22T04:09:11.000Z
|
bqplot/traits.py
|
maartenbreddels/bqplot
|
cbd37f7acf94c8dddf929e9d2485d2b102ce49b9
|
[
"Apache-2.0"
] | null | null | null |
bqplot/traits.py
|
maartenbreddels/bqplot
|
cbd37f7acf94c8dddf929e9d2485d2b102ce49b9
|
[
"Apache-2.0"
] | 1
|
2021-08-29T09:38:02.000Z
|
2021-08-29T09:38:02.000Z
|
# Copyright 2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
============
Traits Types
============
.. currentmodule:: bqplot.traits
.. autosummary::
:toctree: _generate/
Date
"""
from traitlets import Instance, TraitError, TraitType, Undefined
import traittypes as tt
import numpy as np
import pandas as pd
import warnings
import datetime as dt
# Date
def date_to_json(value, obj):
if value is None:
return value
else:
return value.strftime('%Y-%m-%dT%H:%M:%S.%f')
def date_from_json(value, obj):
if value:
return dt.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%f')
else:
return value
date_serialization = dict(to_json=date_to_json, from_json=date_from_json)
class Date(TraitType):
"""
A datetime trait type.
Converts the passed date into a string format that can be used to
construct a JavaScript datetime.
"""
def validate(self, obj, value):
try:
if isinstance(value, dt.datetime):
return value
if isinstance(value, dt.date):
return dt.datetime(value.year, value.month, value.day)
if np.issubdtype(np.dtype(value), np.datetime64):
# TODO: Fix this. Right now, we have to limit the precision
# of time to microseconds because np.datetime64.astype(datetime)
# returns date values only for precision <= 'us'
value_truncated = np.datetime64(value, 'us')
return value_truncated.astype(dt.datetime)
except Exception:
self.error(obj, value)
self.error(obj, value)
def __init__(self, default_value=dt.datetime.today(), **kwargs):
args = (default_value,)
self.default_value = default_value
super(Date, self).__init__(args=args, **kwargs)
self.tag(**date_serialization)
def convert_to_date(array, fmt='%m-%d-%Y'):
# If array is a np.ndarray with type == np.datetime64, the array can be
# returned as such. If it is an np.ndarray of dtype 'object' then conversion
# to string is tried according to the fmt parameter.
if(isinstance(array, np.ndarray) and np.issubdtype(array.dtype, np.datetime64)):
# no need to perform any conversion in this case
return array
elif(isinstance(array, list) or (isinstance(array, np.ndarray) and array.dtype == 'object')):
return_value = []
# Pandas to_datetime handles all the cases where the passed in
# data could be any of the combinations of
# [list, nparray] X [python_datetime, np.datetime]
# Because of the coerce=True flag, any non-compatible datetime type
# will be converted to pd.NaT. By this comparision, we can figure
# out if it is date castable or not.
if(len(np.shape(array)) == 2):
for elem in array:
temp_val = pd.to_datetime(
elem, errors='coerce', box=False, infer_datetime_format=True)
temp_val = elem if (
temp_val[0] == np.datetime64('NaT')) else temp_val
return_value.append(temp_val)
elif(isinstance(array, list)):
temp_val = pd.to_datetime(
array, errors='coerce', box=False, infer_datetime_format=True)
return_value = array if (
temp_val[0] == np.datetime64('NaT')) else temp_val
else:
temp_val = pd.to_datetime(
array, errors='coerce', box=False, infer_datetime_format=True)
temp_val = array if (
temp_val[0] == np.datetime64('NaT')) else temp_val
return_value = temp_val
return return_value
elif(isinstance(array, np.ndarray)):
warnings.warn("Array could not be converted into a date")
return array
# Array
def array_from_json(value, obj=None):
if value is not None:
if value.get('values') is not None:
dtype = {
'date': np.datetime64,
'float': np.float64
}.get(value.get('type'), object)
return np.asarray(value['values'], dtype=dtype)
def array_to_json(a, obj=None):
if a is not None:
if np.issubdtype(a.dtype, np.float):
# replace nan with None
dtype = 'float'
a = np.where(np.isnan(a), None, a)
elif a.dtype in (int, np.int64):
dtype = 'float'
a = a.astype(np.float64)
elif np.issubdtype(a.dtype, np.datetime64):
dtype = 'date'
a = a.astype(np.str).astype('object')
for x in np.nditer(a, flags=['refs_ok'], op_flags=['readwrite']):
# for every element in the nd array, forcing the conversion into
# the format specified here.
temp_x = pd.to_datetime(x.flatten()[0])
if pd.isnull(temp_x):
x[...] = None
else:
x[...] = temp_x.to_pydatetime().strftime(
'%Y-%m-%dT%H:%M:%S.%f')
else:
dtype = a.dtype
return dict(values=a.tolist(), type=str(dtype))
else:
return dict(values=a, type=None)
array_serialization = dict(to_json=array_to_json, from_json=array_from_json)
# array validators
def array_squeeze(trait, value):
if len(value.shape) > 1:
return np.squeeze(value)
else:
return value
def array_dimension_bounds(mindim=0, maxdim=np.inf):
def validator(trait, value):
dim = len(value.shape)
if dim < mindim or dim > maxdim:
raise TraitError('Dimension mismatch for trait %s of class %s: expected an \
array of dimension comprised in interval [%s, %s] and got an array of shape %s'\
% (trait.name, trait.this_class, mindim, maxdim, value.shape))
return value
return validator
# DataFrame
def dataframe_from_json(value, obj):
if value is None:
return None
else:
return pd.DataFrame(value)
def dataframe_to_json(df, obj):
if df is None:
return None
else:
return df.to_dict(orient='records')
dataframe_serialization = dict(to_json=dataframe_to_json, from_json=dataframe_from_json)
# dataframe validators
def dataframe_warn_indexname(trait, value):
if value.index.name is not None:
warnings.warn("The '%s' dataframe trait of the %s instance disregards the index name" % (trait.name, trait.this_class))
value = value.reset_index()
return value
# Series
def series_from_json(value, obj):
return pd.Series(value)
def series_to_json(value, obj):
return value.to_dict()
series_serialization = dict(to_json=series_to_json, from_json=series_from_json)
| 33.930233
| 127
| 0.619465
|
d09f12c7f4bd1063daf03277d1a09458697766e8
| 5,188
|
py
|
Python
|
Components/PostScreen.py
|
CMPUT-291-Miniproject/Miniproject-2
|
439d1e555117d1882c3ece37b04016ca06739100
|
[
"MIT"
] | null | null | null |
Components/PostScreen.py
|
CMPUT-291-Miniproject/Miniproject-2
|
439d1e555117d1882c3ece37b04016ca06739100
|
[
"MIT"
] | null | null | null |
Components/PostScreen.py
|
CMPUT-291-Miniproject/Miniproject-2
|
439d1e555117d1882c3ece37b04016ca06739100
|
[
"MIT"
] | null | null | null |
from Components.Terminal import Terminal
from Components.Post import Post
class PostScreen:
"""
User interface for generating and posting a question.
Uses the framework from PostQuestion.py to enter the question into the database, and Terminal for extra UI commands.
"""
def __init__(self, uid=None):
"""
Creates an instance of PostQuestionScreen, which is used in Main.py (subject to change)
Parameters:
uid: User ID of the poster. Needed for adding question to database.
Additional Init. Variables:
__body__: Post Object. Instance of the posting framework, used to add questions and answers to the database.
Returns: N/A
"""
self.__body__ = Post(uid)
def printQuestionScreen(self):
"""
User interface for adding a question to the database. This method validates all user input before passing it to PostQuestion.py, which actually adds the entry to the database.
Parameters: N/A
Returns: N/A
"""
repeat = True
#Main input loop, runs until user input is valid, or they quit out of the menue.
while repeat:
Terminal.clear()
Terminal.printCenter("---POSTING QUESTION---")
Terminal.printCenter("To go back without posting a question, input BACK during any prompt.")
print('\n')
#get title of post
title = input("Please enter the title of the question: ")
if title.lower().strip() == "back":
return
print('\n')
#Get body of post
body = input("Please enter the description of the question: ")
if body.lower().strip() == "back":
return
print('\n')
#get tags of the post
tag_input = input("Please enter the tags for this post, delimited by commas: ")
#Formats the tag input for addition to the db. tags is the edited version of tag_input.
tags = tag_input.lower().strip()
if tags == "back":
return
tags = tags.split(',')
if len(tag_input) == 0:
tags = None
else:
final_tags = []
for i in range(len(tags)):
prev_tag = tags[i].strip()
new_tag = ""
for character in prev_tag:
if character.isspace():
new_tag += "-"
else:
new_tag += character
if len(new_tag) > 0:
final_tags.append(new_tag)
#input validation loop. breaks if input is Y or N.
while True:
Terminal.clear()
#prints the title and body
Terminal.printCenter("Title: "+title)
Terminal.printCenter("Body: "+body)
#prints the tags if applicable
if tags:
Terminal.printCenter("Tags: "+tag_input)
else:
Terminal.printCenter("Tags: N/A")
print("\n")
choice = input("Is this what you want to post? (Y or N): ")
#if the user gives the ok to post, break out of all loops and add the post
if choice.upper() == "Y":
repeat = False
break
#if the user wants to change their post, repeat main loop but break this input loop
elif choice.upper() == 'N':
input("Press enter to continue:")
return None
#Any other input repeats the input loop
else:
input("Invalid input. Press enter to continue.")
#adds the question to the database and alerts the user that the operation was a success.
self.__body__.add_post(title, body, final_tags)
input("Your question has been posted. Press enter to return back to the main menue.")
return
def printAnswerScreen(self, pid):
"""
User interface for adding an answer to the database. This method validates all user input before passing it to Post.py, which actually adds the entry to the database.
Parameters: N/A
Returns: N/A
"""
#TODO:
repeat = True
#Main input loop, runs until user input is valid, or they quit out of the menue.
while repeat:
Terminal.printCenter("---POSTING ANSWER---")
Terminal.printCenter("To go back without posting a question, input BACK during any prompt.")
print('\n')
#gets the title and body of the post in a tuple
info = self.__body__.get_info(pid)
print("---ORIGINAL QUESTION---")
print(info[0])
print(info[1])
print('\n')
#Get body of post
body = input("Please enter your answer: ")
if body.lower().strip() == "back":
return
#input validation loop. breaks if input is Y or N.
while True:
Terminal.clear()
Terminal.printCenter(body)
print("\n")
choice = input("Is this what you want to post? (Y or N): ")
#if the user gives the ok to post, break out of all loops and add the post
if choice.upper() == "Y":
repeat = False
break
#if the user wants to change their post, repeat main loop but break this input loop
elif choice.upper() == 'N':
input("Press enter to continue: ")
return None
#Any other input repeats the input loop
else:
input("Invalid input. Press enter to continue.")
#adds the answer to the database and alerts the user that the operation was a success.
self.__body__.add_post(None, body, None, pid)
input("Your answer has been posted. Press enter to return back to the main menue.")
return
if __name__ == "__main__":
postQuestionScreen = PostScreen()
postQuestionScreen.printQuestionScreen()
| 28.98324
| 177
| 0.660563
|
3a278cbca58e46a4e4b14ae2ccf04ef78af77a2e
| 2,828
|
py
|
Python
|
get_score/full_para.py
|
Spritea/pytorch-semseg-fp16-one-titan
|
33dcd61b1f703f52a5f7ba4ac758c3a1726e4273
|
[
"MIT"
] | null | null | null |
get_score/full_para.py
|
Spritea/pytorch-semseg-fp16-one-titan
|
33dcd61b1f703f52a5f7ba4ac758c3a1726e4273
|
[
"MIT"
] | null | null | null |
get_score/full_para.py
|
Spritea/pytorch-semseg-fp16-one-titan
|
33dcd61b1f703f52a5f7ba4ac758c3a1726e4273
|
[
"MIT"
] | null | null | null |
import cv2 as cv
from get_score import util
import numpy as np
import time
from pathlib import Path
import natsort
from get_score.metrics_my import runningScore
from tqdm import tqdm
from multiprocessing.dummy import Pool as ThreadPool
import itertools
def load_image(path):
image = cv.cvtColor(cv.imread(path, 1), cv.COLOR_BGR2RGB)
return image
Tensor_Path = Path("/home/spl03/code/pytorch-semseg-fp16/test_out/vaihingen/data07_or_08/numpy_softmax/combined_numpy/deeplabv3")
Tensor_File = natsort.natsorted(list(Tensor_Path.glob("*.npy")), alg=natsort.PATH)
Tensor_Str = []
for j in Tensor_File:
Tensor_Str.append(str(j))
GT_Path = Path("/home/spl03/code/pytorch-semseg-fp16/get_score/Vaihingen/val_gt_full")
GT_File = natsort.natsorted(list(GT_Path.glob("*.tif")), alg=natsort.PATH)
GT_Str = []
for j in GT_File:
GT_Str.append(str(j))
prefix='/home/spl03/code/pytorch-semseg-fp16/get_score/Vaihingen/PR/deeplabv3'
# th=0.98
th=list(np.arange(0,0.99,0.02))
th.extend([0.99,0.999,0.9999,0.99999])
pre=[]
rec=[]
t = time.time()
running_metrics_val = runningScore(2)
# label_values = [[0, 0, 0], [250, 250, 250]]
# label_values = [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], [255, 255, 0], [255, 0, 0],[0,0,0]]
# 注意不要[0,0,0],这是local evaluation.
label_values = [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0], [255, 255, 0], [255, 0, 0]]
object_class=0
def compute_one(img_path,gt_path):
gt = load_image(gt_path)
# val_gt_erode paired with [0,0,0]label value
# label order: R G B
# num_classes = len(label_values)
gt = util.reverse_one_hot(util.one_hot_it(gt, label_values))
gt_binary = np.zeros(gt.shape, dtype=np.uint8)
gt_binary[gt == object_class] = 1
output_image = img_path
running_metrics_val.update(gt_binary, output_image)
def backbone(k,th):
lanes = np.load(Tensor_Str[k])
lanes_one_channel = lanes[:, :, object_class]
height, width = lanes_one_channel.shape
pred = np.zeros((height, width), dtype=np.uint8)
# pred[lanes_one_channel > th] = 4
pred[lanes_one_channel > th] = 1
# pred_3chan=np.repeat(pred.reshape(height, width, 1), 3, axis=2)
compute_one(pred, GT_Str[k])
def full_one(th):
running_metrics_val.reset()
pool=ThreadPool(26)
pool.starmap(backbone,zip(range(len(Tensor_Str)),itertools.repeat(th)))
pool.close()
pool.join()
acc, cls_pre, cls_rec, cls_f1, cls_iu, hist = running_metrics_val.get_scores()
pre.append(cls_pre[1])
rec.append(cls_rec[1])
print("cls pre")
print(cls_pre)
print("cls rec")
print(cls_rec)
for item in th:
full_one(item)
tt = time.time() - t
pre_path=prefix+'/'+'pre_'+str(object_class)+'.npy'
rec_path=prefix+'/'+'rec_'+str(object_class)+'.npy'
np.save(pre_path,pre)
np.save(rec_path,rec)
print("time: %f" %tt)
| 30.408602
| 129
| 0.694837
|
0c8402d698ba06e390d5810e9c18f112b2da581d
| 16,485
|
py
|
Python
|
OLDER_MSEplots_metpy/MSEplots/plots.py
|
brianmapes/MSEplot
|
3f3da5019544eb89638dfb804528191eb5cb0879
|
[
"MIT"
] | 3
|
2018-07-16T07:42:57.000Z
|
2020-05-30T23:11:05.000Z
|
OLDER_MSEplots_metpy/MSEplots/plots.py
|
brianmapes/MSEplot
|
3f3da5019544eb89638dfb804528191eb5cb0879
|
[
"MIT"
] | 4
|
2018-11-10T22:04:18.000Z
|
2018-12-13T10:50:43.000Z
|
OLDER_MSEplots_metpy/MSEplots/plots.py
|
brianmapes/MSEplot
|
3f3da5019544eb89638dfb804528191eb5cb0879
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import metpy.calc as mpcalc
from metpy.units import units
from metpy.constants import Cp_d,Lv,Rd,g
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin() # find minimum
return (idx)
def thermo_plots(pressure,temperature,mixing_ratio):
""""
plots for vertical profiles of temperature, dewpoint, mixing ratio and relative humidity.
Parameters
----------
pressure : array-like
Atmospheric pressure profile (surface to TOA)
temperature: array-like
Atmospheric temperature profile (surface to TOA)
dewpoint: array-like
Atmospheric dewpoint profile (surface to TOA)
Returns
-------
"""
p = pressure*units('mbar')
q = mixing_ratio*units('kilogram/kilogram')
T = temperature*units('degC')
Td = mpcalc.dewpoint_from_specific_humidity(q,T,p) # dewpoint
Tp = mpcalc.parcel_profile(p,T[0],Td[0]) # parcel
plt.figure(figsize = (12,5))
lev = find_nearest(p.magnitude,100);
plt.subplot(1,3,1)
plt.plot(T[:lev],p[:lev],'-ob')
plt.plot(Td[:lev],p[:lev],'-og')
plt.plot(Tp[:lev],p[:lev],'-or')
plt.xlabel('Temperature [C]',fontsize=12)
plt.ylabel('Pressure [hpa]',fontsize=12)
plt.gca().invert_yaxis()
plt.legend(['Temp','Temp_Dew','Temp_Parcel'],loc=1)
plt.grid()
qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(T),p)
# Relative humidity
RH = q/qs*100 # Relative humidity
plt.subplot(1,3,2)
plt.plot(q[:lev],p[:lev],'-og')
plt.xlabel('Mixing ratio [kg/kg]',fontsize=12)
plt.gca().invert_yaxis()
plt.grid()
plt.subplot(1,3,3)
plt.plot(RH[:lev],p[:lev],'-og')
plt.xlabel('Relative humiduty [%]',fontsize=12)
plt.gca().invert_yaxis()
plt.grid()
plt.tight_layout()
return (plt)
def theta_plots(pressure,temperature,mixing_ratio):
"""
plots for vertical profiles of potential temperature, equivalent potential temperature,
and saturated equivalent potential temperature
"""
p = pressure*units('mbar')
T = temperature*units('degC')
q = mixing_ratio*units('kilogram/kilogram')
lev = find_nearest(p.magnitude,100)
Td = mpcalc.dewpoint(mpcalc.vapor_pressure(p,q)) # dewpoint
theta = mpcalc.potential_temperature(p,T)
theta_e = mpcalc.equivalent_potential_temperature(p,T,Td)
theta_es = mpcalc.equivalent_potential_temperature(p,T,T)
plt.figure(figsize=(7,7))
plt.plot(theta[:lev],p[:lev],'-ok')
plt.plot(theta_e[:lev],p[:lev],'-ob')
plt.plot(theta_es[:lev],p[:lev],'-or')
plt.xlabel('Temperature [K]',fontsize=12)
plt.ylabel('Pressure [hpa]',fontsize=12)
plt.gca().invert_yaxis()
plt.legend(['$\\theta$','$\\theta_e$','$\\theta_{es}$'],loc=1)
plt.grid()
return (plt)
def msed_plots(pressure,temperature,mixing_ratio,h0_std=2000,ensemble_size=20,ent_rate=np.arange(0,2,0.05),
entrain=False):
"""
plotting the summarized static energy diagram with annotations and thermodynamic parameters
"""
p = pressure*units('mbar')
T = temperature*units('degC')
q = mixing_ratio*units('kilogram/kilogram')
qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(T),p)
Td = mpcalc.dewpoint(mpcalc.vapor_pressure(p,q)) # dewpoint
Tp = mpcalc.parcel_profile(p,T[0],Td[0]).to('degC') # parcel profile
# Altitude based on the hydrostatic eq.
altitude = np.zeros((np.size(T)))*units('meter') # surface is 0 meter
for i in range(np.size(T)):
altitude[i] = mpcalc.thickness_hydrostatic(p[:i+1],T[:i+1]) # Hypsometric Eq. for height
# Static energy calculations
mse = mpcalc.moist_static_energy(altitude,T,q)
mse_s = mpcalc.moist_static_energy(altitude,T,qs)
dse = mpcalc.dry_static_energy(altitude,T)
# Water vapor calculations
p_PWtop = max(200*units.mbar, min(p) + 1*units.mbar) # integrating until 200mb
cwv = mpcalc.precipitable_water(Td,p,top=p_PWtop) # column water vapor [mm]
cwvs = mpcalc.precipitable_water(T,p,top=p_PWtop) # saturated column water vapor [mm]
crh = (cwv/cwvs)*100. # column relative humidity [%]
#================================================
# plotting MSE vertical profiles
fig = plt.figure(figsize=[12,8])
ax = fig.add_axes([0.1,0.1,0.6,0.8])
ax.plot(dse,p,'-k',linewidth=2)
ax.plot(mse,p,'-b',linewidth=2)
ax.plot(mse_s,p,'-r',linewidth=2)
# mse based on different percentages of relative humidity
qr = np.zeros((9,np.size(qs)))*units('kilogram/kilogram'); mse_r = qr*units('joule/kilogram')# container
for i in range(9):
qr[i,:] = qs*0.1*(i+1)
mse_r[i,:] = mpcalc.moist_static_energy(altitude,T,qr[i,:])
for i in range(9):
ax.plot(mse_r[i,:],p[:],'-',color='grey',linewidth=0.7)
ax.text(mse_r[i,3].magnitude/1000-1,p[3].magnitude,str((i+1)*10))
# drawing LCL and LFC levels
[lcl_pressure, lcl_temperature] = mpcalc.lcl(p[0], T[0], Td[0])
lcl_idx = np.argmin(np.abs(p.magnitude - lcl_pressure.magnitude))
[lfc_pressure, lfc_temperature] = mpcalc.lfc(p,T,Td)
lfc_idx = np.argmin(np.abs(p.magnitude - lfc_pressure.magnitude))
# conserved mse of air parcel arising from 1000 hpa
mse_p = np.squeeze(np.ones((1,np.size(T)))*mse[0].magnitude)
# illustration of CAPE
el_pressure,el_temperature = mpcalc.el(p,T,Td) # equilibrium level
el_idx = np.argmin(np.abs(p.magnitude - el_pressure.magnitude))
ELps = [el_pressure.magnitude] # Initialize an array of EL pressures for detrainment profile
[CAPE,CIN] = mpcalc.cape_cin(p[:el_idx],T[:el_idx],Td[:el_idx],Tp[:el_idx])
plt.plot(mse_p,p,color='green',linewidth=2)
ax.fill_betweenx(p[lcl_idx:el_idx+1],mse_p[lcl_idx:el_idx+1],mse_s[lcl_idx:el_idx+1],interpolate=True
,color='green',alpha='0.3')
ax.fill_betweenx(p,dse,mse,color='deepskyblue',alpha='0.5')
ax.set_xlabel('Specific static energies: s, h, hs [kJ kg$^{-1}$]',fontsize=14)
ax.set_ylabel('Pressure [hpa]',fontsize=14)
ax.set_xticks([280,300,320,340,360,380])
ax.set_xlim([280,390])
ax.set_ylim(1030,120)
if entrain is True:
# Depict Entraining parcels
# Parcel mass solves dM/dz = eps*M, solution is M = exp(eps*Z)
# M=1 at ground without loss of generality
# Distribution of surface parcel h offsets
H0STDEV = h0_std # J/kg
h0offsets = np.sort(np.random.normal(0, H0STDEV, ensemble_size))*units('joule/kilogram')
# Distribution of entrainment rates
entrainment_rates = ent_rate /(units('km'))
for h0offset in h0offsets:
h4ent = mse.copy(); h4ent[0] += h0offset;
for eps in entrainment_rates:
M = np.exp(eps * (altitude-altitude[0])).to('dimensionless')
# dM is the mass contribution at each level, with 1 at the origin level.
M[0] = 0
dM = np.gradient(M)
# parcel mass is a sum of all the dM's at each level
# conserved linearly-mixed variables like h are weighted averages
hent = np.cumsum(dM*h4ent) / np.cumsum(dM)
# Boolean for positive buoyancy, and its topmost altitude (index) where curve is clippes
posboy = (hent > mse_s); posboy[0] = True # so there is always a detrainment level
ELindex_ent = np.max(np.where(posboy))
# Plot the curve
plt.plot( hent[0:ELindex_ent+2], p[0:ELindex_ent+2], linewidth=0.25, color='g')
# Keep a list for a histogram plot (detrainment profile)
if p[ELindex_ent].magnitude < lfc_pressure.magnitude: # buoyant parcels only
ELps.append( p[ELindex_ent].magnitude )
# Plot a crude histogram of parcel detrainment levels
NBINS = 20
pbins = np.linspace(1000,150,num=NBINS) # pbins for detrainment levels
hist = np.zeros((len(pbins)-1))
for x in ELps:
for i in range(len(pbins)-1):
if (x < pbins[i]) & (x >= pbins[i+1]):
hist[i] += 1;break
det_per = hist/sum(hist)*100; # percentages of detrainment ensumbles at levels
ax2 = fig.add_axes([0.705,0.1,0.1,0.8],facecolor=None)
ax2.barh( pbins[1:], det_per, color='lightgrey',edgecolor='k',height=15*(20/NBINS))
ax2.set_xlim([0,max(det_per)])
ax2.set_ylim([1030,120])
ax2.set_xlabel('Detrainment [%]')
ax2.grid()
ax2.set_zorder(2)
ax.plot( [400,400], [1100,0])
ax.annotate('Detrainment', xy=(362,320), color='dimgrey')
ax.annotate('ensemble: ' + str(ensemble_size*len(entrainment_rates)), xy=(364, 340), color='dimgrey')
ax.annotate('Detrainment', xy=(362,380), color='dimgrey')
ax.annotate(' scale: 0 - 2 km', xy=(365,400), color='dimgrey')
# Overplots on the mess: undilute parcel and CAPE, etc.
ax.plot( (1,1)*mse[0], (1,0)*(p[0]), color='g',linewidth=2)
# Replot the sounding on top of all that mess
ax.plot(mse_s , p, color='r', linewidth=1.5)
ax.plot(mse , p, color='b', linewidth=1.5)
# label LCL and LCF
ax.plot((mse_s[lcl_idx]+(-2000,2000)*units('joule/kilogram')), lcl_pressure+(0,0)*units('mbar') ,color='orange',linewidth=3)
ax.plot((mse_s[lfc_idx]+(-2000,2000)*units('joule/kilogram')), lfc_pressure+(0,0)*units('mbar') , color='magenta',linewidth=3)
### Internal waves (100m adiabatic displacements, assumed adiabatic: conserves s, sv, h).
#dZ = 100 *mpunits.units.meter
dp = 1000*units.pascal
# depict displacements at sounding levels nearest these target levels
targetlevels = [900,800,700,600,500,400,300,200]*units.hPa
for ilev in targetlevels:
idx = np.argmin(np.abs(p - ilev))
# dp: hydrostatic
rho = (p[idx])/Rd/(T[idx])
dZ = -dp/rho/g
# dT: Dry lapse rate dT/dz_dry is -g/Cp
dT = (-g/Cp_d *dZ).to('kelvin')
Tdisp = T[idx].to('kelvin') + dT
# dhsat
dqs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(Tdisp) ,p[idx]+dp) - qs[idx]
dhs = g*dZ + Cp_d*dT + Lv*dqs
# Whiskers on the data plots
ax.plot( (mse_s[idx]+dhs*(-1,1)), p[idx]+dp*(-1,1), linewidth=3, color='r')
ax.plot( (dse[idx] *( 1,1)), p[idx]+dp*(-1,1), linewidth=3, color='k')
ax.plot( (mse[idx] *( 1,1)), p[idx]+dp*(-1,1), linewidth=3, color='b')
# annotation to explain it
if ilev == 400*ilev.units:
ax.plot(360*mse_s.units +dhs*(-1,1)/1000, 440*units('mbar')
+dp*(-1,1), linewidth=3, color='r')
ax.annotate('+/- 10mb', xy=(362,440), fontsize=8)
ax.annotate(' adiabatic displacement', xy=(362,460), fontsize=8)
# Plot a crude histogram of parcel detrainment levels
# Text parts
ax.text(290,pressure[3],'RH (%)',fontsize=11,color='k')
ax.text(285,200,'CAPE = '+str(np.around(CAPE.magnitude,decimals=2))+' [J/kg]',fontsize=12,color='green')
ax.text(285,250,'CIN = '+str(np.around(CIN.magnitude,decimals=2))+' [J/kg]',fontsize=12,color='green')
ax.text(285,300,'LCL = '+str(np.around(lcl_pressure.magnitude,decimals=2))+' [hpa]',fontsize=12,color='darkorange')
ax.text(285,350,'LFC = '+str(np.around(lfc_pressure.magnitude,decimals=2))+' [hpa]',fontsize=12,color='magenta')
ax.text(285,400,'CWV = '+str(np.around(cwv.magnitude,decimals=2))+' [mm]',fontsize=12,color='deepskyblue')
ax.text(285,450,'CRH = '+str(np.around(crh.magnitude,decimals=2))+' [%]',fontsize=12,color='blue')
ax.legend(['DSE','MSE','SMSE'],fontsize=12,loc=1)
ax.set_zorder(3)
return (ax)
def add_curves_Wyoming(ax,datetime,station,linewidth=1.0,LH_Tdepend=False):
"""
overlaying new curves of multiple soundings from Wyoming datasets
date: using datetime module. ex. datetime(2018,06,06)
station: station name. ex. 'MFL' Miami, Florida
"""
from siphon.simplewebservice.wyoming import WyomingUpperAir
date = datetime
station = station
df = WyomingUpperAir.request_data(date, station)
pressure = df['pressure'].values
Temp = df['temperature'].values
Temp_dew = df['dewpoint'].values
altitude = df['height'].values
q = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(Temp_dew*units('degC')),pressure*units('mbar'))
q = mpcalc.specific_humidity_from_mixing_ratio(q)
qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(Temp*units('degC')),pressure*units('mbar'))
# specific energies
if LH_Tdepend == False:
mse = mpcalc.moist_static_energy(altitude*units('meter'),Temp*units('degC'),q)
mse_s = mpcalc.moist_static_energy(altitude*units('meter'),Temp*units('degC'),qs)
dse = mpcalc.dry_static_energy(altitude*units('meter'),Temp*units('degC'))
else:
# A short course in cloud physics, Roger and Yau (1989)
Lvt = (2500.8 - 2.36*T.magnitude + 0.0016*T.magnitude**2 -
0.00006*T.magnitude**3)*units('joule/gram') # latent heat of evaporation
#Lf = 2834.1 - 0.29*T - 0.004*T**2 # latent heat of fusion
mse = Cp_d*T + g*altitude + Lvt*q
mse_s = Cp_d*T + g*altitude + Lvt*qs
dse = mpcalc.dry_static_energy(altitude,T)
# adding curves on the main axes
ax.plot(dse.magnitude, pressure, 'k', linewidth=linewidth)
ax.plot(mse.magnitude, pressure, 'b', linewidth=linewidth)
ax.plot(mse_s.magnitude, pressure, 'r', linewidth=linewidth)
def add_curves(ax,pressure,temperature,mixing_ratio,linewidth=1.0,LH_Tdepend=False):
"""
overlaying new curves of multiple soundings from profiles
"""
p = pressure*units('mbar')
T = temperature*units('degC')
q = mixing_ratio*units('kilogram/kilogram')
qs = mpcalc.mixing_ratio(mpcalc.saturation_vapor_pressure(T),p)
Td = mpcalc.dewpoint(mpcalc.vapor_pressure(p,q)) # dewpoint
Tp = mpcalc.parcel_profile(p,T[0],Td[0]).to('degC') # parcel profile
# Altitude based on the hydrostatic eq.
altitude = np.zeros((np.size(T)))*units('meter') # surface is 0 meter
for i in range(np.size(T)):
altitude[i] = mpcalc.thickness_hydrostatic(p[:i+1],T[:i+1]) # Hypsometric Eq. for height
# specific energies
if LH_Tdepend == False:
mse = mpcalc.moist_static_energy(altitude,T,q)
mse_s = mpcalc.moist_static_energy(altitude,T,qs)
dse = mpcalc.dry_static_energy(altitude,T)
else:
# A short course in cloud physics, Roger and Yau (1989)
Lvt = (2500.8 - 2.36*T.magnitude + 0.0016*T.magnitude**2 -
0.00006*T.magnitude**3)*units('joule/gram') # latent heat of evaporation
#Lf = 2834.1 - 0.29*T - 0.004*T**2 # latent heat of fusion
mse = Cp_d*T + g*altitude + Lvt*q
mse_s = Cp_d*T + g*altitude + Lvt*qs
dse = mpcalc.dry_static_energy(altitude,T)
ax.plot(dse, p, '--k', linewidth=linewidth)
ax.plot(mse, p, '--b', linewidth=linewidth)
ax.plot(mse_s, p, '--r', linewidth=linewidth)
def add_RCEREF(ax,cooling=-1.3,heatflux=116):
### Energy is area, draw reference boxes.
RCEloc = 260
ax.set_xlim([250,390])
ax.plot([RCEloc,RCEloc],[0,1100], linewidth=0.5) ### thin reference line
ax.annotate('daily RCE', xy=(RCEloc,1045), horizontalalignment='center')
#### Radiative cooling reference
ax.fill([RCEloc , RCEloc -1.3, RCEloc -1.3, RCEloc, RCEloc ],
[1000 , 1000 , 200 , 200, 1000],
linewidth=1, color='c', alpha=0.9)
ax.annotate(' cooling'+ str(cooling) + '$K/d$', xy=(RCEloc-5, 300), color='c')
#### Surface flux reference
ax.fill([RCEloc , RCEloc +11, RCEloc +11, RCEloc, RCEloc ],
[1000 , 1000 , 910 , 910, 1000],
linewidth=1, color='orange', alpha=0.5)
ax.annotate(' heat flux', xy=(RCEloc,890), color='orange')
ax.annotate(str(heatflux) + '$W m^{-2}$', xy=(RCEloc,940))
| 42.818182
| 134
| 0.616985
|
2d4b92f41083eb6fbedf6e63838805887c6f72bd
| 6,884
|
py
|
Python
|
treconomics_project/search/diversify.py
|
leifos/treconomics
|
28eeae533cbe92a717b1d552efe57485f301fa37
|
[
"MIT"
] | 1
|
2021-07-20T01:58:22.000Z
|
2021-07-20T01:58:22.000Z
|
treconomics_project/search/diversify.py
|
leifos/treconomics
|
28eeae533cbe92a717b1d552efe57485f301fa37
|
[
"MIT"
] | null | null | null |
treconomics_project/search/diversify.py
|
leifos/treconomics
|
28eeae533cbe92a717b1d552efe57485f301fa37
|
[
"MIT"
] | null | null | null |
#
# Diversification Algorithm with access to the diversity QRELs
# Mark II -- More complex algorithm, not as rewarding as the first attempt.
# Updated to work with the ifind search objects.
#
# Slightly updated to make it easier to drop into the treconomis environment.
#
# Author: David Maxwell and Leif Azzopardi
# Date: 2018-01-06
#
import copy
from treconomics.experiment_functions import qrels_diversity
# TODO: @leifos
# - What values do we use above?
# - To diversity, you need:
# * a list of results
# * a topic number
# * a lambda value
# * a DIVERSIFY_TO_RANK value
#
# - call diversify(results, topic_num, to_rank, lam)
# This returns a new list, with the diversified set of results according to our algorithm.
# The results object you pass in should be an iterable -- it can be a whoosh.results object or a list.
# The object that is returned is just a Python list -- so there could be an issue down the line if it relies on something whoosh.results provides. Hope not -- I can't create an artifical whoosh.results object (easily, at least).
def convert_results_to_list(results, deep_copy=True):
"""
Given a Whoosh results object, converts it to a list and returns that list.
Useful, as the Whoosh results object does not permit reassignment of Hit objects.
Note that if deep_copy is True, a deep copy of the list is returned.
"""
results_list = []
for hit in results:
if deep_copy:
results_list.append(copy.copy(hit))
continue
results_list.append(hit)
return results_list
def get_highest_score_index(results_list):
"""
Given a list of results, returns the index of the hit with the highest score.
Simple find the maximum algorithm stuff going on here.
"""
highest_score = 0.0
highest_index = 0
index = 0
for hit in results_list:
if hit.score > highest_score:
highest_score = hit.score
highest_index = index
index = index + 1
return highest_index
def get_new_entities(observed_entities, document_entities):
"""
Given a list of previously seen entities, and a list of document entities, returns
a list of entities in the document which have not yet been previously seen.
"""
return list(set(document_entities) - set(observed_entities))
# def get_existing_entities(observed_entities, document_entities):
# """
# Given a list of previously seen entities, and a list of document entities, returns
# the intersection of the two lists -- i.e. the entities that have already been seen.
# """
# return list(set(observed_entities) & set(document_entities))
def get_observed_entities_for_list(topic, rankings_list):
"""
Given a list of Whoosh Hit objects, returns a list of the different entities that are mentioned in them.
"""
observed_entities = []
for hit in rankings_list:
docid = hit.docid
entities = qrels_diversity.get_mentioned_entities_for_doc(topic, docid)
new_entities = get_new_entities(observed_entities, entities)
observed_entities = observed_entities + new_entities
return observed_entities
def diversify_results(results, topic, to_rank=30, lam=1.0):
"""
The diversification algorithm.
Given a ifind results object, returns a re-ranked list, with more diverse content at the top.
By diverse, we mean a selection of documents discussing a wider range of identified entities.
"""
results_len = len(results.results)
#results_len = results.scored_length() # Doing len(results) returns the number of hits, not the top k.
#print(results)
# Simple sanity check -- no results? Can't diversify anything!
if results_len == 0:
return results
# Before diversifying, check -- are there enough results to go to to_rank?
# If not, change to_rank to the length of the results we have.
if to_rank is None:
to_rank = results_len
# Not enough results to get to to_rank? Change the to_rank cap to the results length.
if results_len < to_rank:
to_rank = results_len
# Check that lambda is a float in case of floating point calculations...
if type(lam) != float:
lam = float(lam)
############################
### Main algorithm below ###
############################
observed_entities = [] # What entities have been previously seen? This list holds them.
# As the list of results is probably larger than the depth we re-rank to, take a slice.
# This is our original list of results that we'll be modifiying and popping from.
old_rankings = results.results[:to_rank]
# For our new rankings, start with the first document -- this won't change.
# This list will be populated as we iterate through the other rankings list.
new_rankings = [old_rankings.pop(0)]
for i in range(1, to_rank):
observed_entities = get_observed_entities_for_list(topic, new_rankings)
for j in range(0, len(old_rankings)):
docid = old_rankings[j].docid
entities = qrels_diversity.get_mentioned_entities_for_doc(topic, docid)
new_entities = get_new_entities(observed_entities, entities)
#seen_entities = get_existing_entities(qrels_diversity, observed_entities, entities)
old_rankings[j].score = old_rankings[j].score + (lam * len(new_entities))
# Sort the list in reverse order, so the highest score is first. Then pop from old, push to new.
old_rankings.sort(key=lambda x: x.score, reverse=True)
new_rankings.append(old_rankings.pop(0))
results.results = new_rankings + results.results[to_rank:]
return results
# The main algorithm -- only work on the top to_rank documents.
# Leif notes (algorithm): two loops still...
# for first doc, get the mentioned entities. this is outside the loop (set to x).
# for each doc in the rest of the list, what entities are in those docs, and how different are they?
# compute score, sort it, take the first element from that, that becomes the second document
# the key is to sort the sublist scores, and pick the top element from that list.
# take entities from the first two now - replace x with this.
# repeat this until all documents have been observed.
#
# how does lambda influence the performance?
# alpha ndcg -- run the queries from the sigir study from before to see what happens when you change lambda.
# More:
# take all documents that have been judged
# take the non-rel documents in the QRELs for the
# so you update the list of entities with those previously seen (i.e. x) after each document has been observed.
| 39.337143
| 230
| 0.68405
|
073918eda3deb30a7b61a402d8bd20ec3edeedec
| 38,925
|
py
|
Python
|
pandas/tests/groupby/aggregate/test_aggregate.py
|
mavismonica/pandas
|
dbdc55c9d59f25589d58cc60247af193f06c3c66
|
[
"BSD-3-Clause"
] | 6
|
2019-12-21T21:15:54.000Z
|
2021-04-20T17:35:24.000Z
|
pandas/tests/groupby/aggregate/test_aggregate.py
|
mavismonica/pandas
|
dbdc55c9d59f25589d58cc60247af193f06c3c66
|
[
"BSD-3-Clause"
] | 2
|
2021-04-06T18:35:49.000Z
|
2021-06-02T04:09:53.000Z
|
pandas/tests/groupby/aggregate/test_aggregate.py
|
mavismonica/pandas
|
dbdc55c9d59f25589d58cc60247af193f06c3c66
|
[
"BSD-3-Clause"
] | 3
|
2021-01-31T16:40:52.000Z
|
2021-08-29T18:32:34.000Z
|
"""
test .agg behavior / note that .apply is tested generally in test_groupby.py
"""
import datetime
import functools
from functools import partial
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, concat
import pandas._testing as tm
from pandas.core.base import SpecificationError
from pandas.core.groupby.grouper import Grouping
def test_groupby_agg_no_extra_calls():
# GH#31760
df = DataFrame({"key": ["a", "b", "c", "c"], "value": [1, 2, 3, 4]})
gb = df.groupby("key")["value"]
def dummy_func(x):
assert len(x) != 0
return x.sum()
gb.agg(dummy_func)
def test_agg_regression1(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_agg_must_agg(df):
grouped = df.groupby("A")["C"]
msg = "Must produce aggregated value"
with pytest.raises(Exception, match=msg):
grouped.agg(lambda x: x.describe())
with pytest.raises(Exception, match=msg):
grouped.agg(lambda x: x.index[:2])
def test_agg_ser_multi_key(df):
# TODO(wesm): unused
ser = df.C # noqa
f = lambda x: x.sum()
results = df.C.groupby([df.A, df.B]).aggregate(f)
expected = df.groupby(["A", "B"]).sum()["C"]
tm.assert_series_equal(results, expected)
def test_groupby_aggregation_mixed_dtype():
# GH 6212
expected = DataFrame(
{
"v1": [5, 5, 7, np.nan, 3, 3, 4, 1],
"v2": [55, 55, 77, np.nan, 33, 33, 44, 11],
},
index=MultiIndex.from_tuples(
[
(1, 95),
(1, 99),
(2, 95),
(2, 99),
("big", "damp"),
("blue", "dry"),
("red", "red"),
("red", "wet"),
],
names=["by1", "by2"],
),
)
df = DataFrame(
{
"v1": [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9],
"v2": [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99],
"by1": ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12],
"by2": [
"wet",
"dry",
99,
95,
np.nan,
"damp",
95,
99,
"red",
99,
np.nan,
np.nan,
],
}
)
g = df.groupby(["by1", "by2"])
result = g[["v1", "v2"]].mean()
tm.assert_frame_equal(result, expected)
def test_groupby_aggregation_multi_level_column():
# GH 29772
lst = [
[True, True, True, False],
[True, False, np.nan, False],
[True, True, np.nan, False],
[True, True, np.nan, False],
]
df = DataFrame(
data=lst,
columns=pd.MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 0), ("B", 1)]),
)
result = df.groupby(level=1, axis=1).sum()
expected = DataFrame({0: [2.0, 1, 1, 1], 1: [1, 0, 1, 1]})
tm.assert_frame_equal(result, expected)
def test_agg_apply_corner(ts, tsframe):
# nothing to group, all NA
grouped = ts.groupby(ts * np.nan)
assert ts.dtype == np.float64
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64, index=Index([], dtype=np.float64))
tm.assert_series_equal(grouped.sum(), exp)
tm.assert_series_equal(grouped.agg(np.sum), exp)
tm.assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
# DataFrame
grouped = tsframe.groupby(tsframe["A"] * np.nan)
exp_df = DataFrame(
columns=tsframe.columns, dtype=float, index=Index([], dtype=np.float64)
)
tm.assert_frame_equal(grouped.sum(), exp_df, check_names=False)
tm.assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
tm.assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0], check_names=False)
def test_agg_grouping_is_list_tuple(ts):
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouper
grouped.grouper.groupings[0] = Grouping(ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_agg_python_multiindex(mframe):
grouped = mframe.groupby(["A", "B"])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"groupbyfunc", [lambda x: x.weekday(), [lambda x: x.month, lambda x: x.weekday()]]
)
def test_aggregate_str_func(tsframe, groupbyfunc):
grouped = tsframe.groupby(groupbyfunc)
# single series
result = grouped["A"].agg("std")
expected = grouped["A"].std()
tm.assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate("var")
expected = grouped.var()
tm.assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg({"A": "var", "B": "std", "C": "mean", "D": "sem"})
expected = DataFrame(
{
"A": grouped["A"].var(),
"B": grouped["B"].std(),
"C": grouped["C"].mean(),
"D": grouped["D"].sem(),
}
)
tm.assert_frame_equal(result, expected)
def test_aggregate_item_by_item(df):
grouped = df.groupby("A")
aggfun = lambda ser: ser.size
result = grouped.agg(aggfun)
foo = (df.A == "foo").sum()
bar = (df.A == "bar").sum()
K = len(result.columns)
# GH5782
# odd comparisons can result here, so cast to make easy
exp = Series(np.array([foo] * K), index=list("BCD"), dtype=np.float64, name="foo")
tm.assert_series_equal(result.xs("foo"), exp)
exp = Series(np.array([bar] * K), index=list("BCD"), dtype=np.float64, name="bar")
tm.assert_almost_equal(result.xs("bar"), exp)
def aggfun(ser):
return ser.size
result = DataFrame().groupby(df.A).agg(aggfun)
assert isinstance(result, DataFrame)
assert len(result) == 0
def test_wrap_agg_out(three_group):
grouped = three_group.groupby(["A", "B"])
def func(ser):
if ser.dtype == object:
raise TypeError
else:
return ser.sum()
result = grouped.aggregate(func)
exp_grouped = three_group.loc[:, three_group.columns != "C"]
expected = exp_grouped.groupby(["A", "B"]).aggregate(func)
tm.assert_frame_equal(result, expected)
def test_agg_multiple_functions_maintain_order(df):
# GH #610
funcs = [("mean", np.mean), ("max", np.max), ("min", np.min)]
result = df.groupby("A")["C"].agg(funcs)
exp_cols = Index(["mean", "max", "min"])
tm.assert_index_equal(result.columns, exp_cols)
def test_agg_multiple_functions_same_name():
# GH 30880
df = DataFrame(
np.random.randn(1000, 3),
index=pd.date_range("1/1/2012", freq="S", periods=1000),
columns=["A", "B", "C"],
)
result = df.resample("3T").agg(
{"A": [partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
)
expected_index = pd.date_range("1/1/2012", freq="3T", periods=6)
expected_columns = MultiIndex.from_tuples([("A", "quantile"), ("A", "quantile")])
expected_values = np.array(
[df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]]
).T
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_agg_multiple_functions_same_name_with_ohlc_present():
# GH 30880
# ohlc expands dimensions, so different test to the above is required.
df = DataFrame(
np.random.randn(1000, 3),
index=pd.date_range("1/1/2012", freq="S", periods=1000),
columns=["A", "B", "C"],
)
result = df.resample("3T").agg(
{"A": ["ohlc", partial(np.quantile, q=0.9999), partial(np.quantile, q=0.1111)]}
)
expected_index = pd.date_range("1/1/2012", freq="3T", periods=6)
expected_columns = pd.MultiIndex.from_tuples(
[
("A", "ohlc", "open"),
("A", "ohlc", "high"),
("A", "ohlc", "low"),
("A", "ohlc", "close"),
("A", "quantile", "A"),
("A", "quantile", "A"),
]
)
non_ohlc_expected_values = np.array(
[df.resample("3T").A.quantile(q=q).values for q in [0.9999, 0.1111]]
).T
expected_values = np.hstack([df.resample("3T").A.ohlc(), non_ohlc_expected_values])
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
# PerformanceWarning is thrown by `assert col in right` in assert_frame_equal
with tm.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(result, expected)
def test_multiple_functions_tuples_and_non_tuples(df):
# #1359
funcs = [("foo", "mean"), "std"]
ex_funcs = [("foo", "mean"), ("std", "std")]
result = df.groupby("A")["C"].agg(funcs)
expected = df.groupby("A")["C"].agg(ex_funcs)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").agg(funcs)
expected = df.groupby("A").agg(ex_funcs)
tm.assert_frame_equal(result, expected)
def test_more_flexible_frame_multi_function(df):
grouped = df.groupby("A")
exmean = grouped.agg({"C": np.mean, "D": np.mean})
exstd = grouped.agg({"C": np.std, "D": np.std})
expected = concat([exmean, exstd], keys=["mean", "std"], axis=1)
expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1)
d = {"C": [np.mean, np.std], "D": [np.mean, np.std]}
result = grouped.aggregate(d)
tm.assert_frame_equal(result, expected)
# be careful
result = grouped.aggregate({"C": np.mean, "D": [np.mean, np.std]})
expected = grouped.aggregate({"C": np.mean, "D": [np.mean, np.std]})
tm.assert_frame_equal(result, expected)
def foo(x):
return np.mean(x)
def bar(x):
return np.std(x, ddof=1)
# this uses column selection & renaming
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
d = {"C": np.mean, "D": {"foo": np.mean, "bar": np.std}}
grouped.aggregate(d)
# But without renaming, these functions are OK
d = {"C": [np.mean], "D": [foo, bar]}
grouped.aggregate(d)
def test_multi_function_flexible_mix(df):
# GH #1268
grouped = df.groupby("A")
# Expected
d = {"C": {"foo": "mean", "bar": "std"}, "D": {"sum": "sum"}}
# this uses column selection & renaming
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate(d)
# Test 1
d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
# this uses column selection & renaming
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate(d)
# Test 2
d = {"C": {"foo": "mean", "bar": "std"}, "D": "sum"}
# this uses column selection & renaming
with pytest.raises(SpecificationError, match=msg):
grouped.aggregate(d)
def test_groupby_agg_coercing_bools():
# issue 14873
dat = DataFrame({"a": [1, 1, 2, 2], "b": [0, 1, 2, 3], "c": [None, None, 1, 1]})
gp = dat.groupby("a")
index = Index([1, 2], name="a")
result = gp["b"].aggregate(lambda x: (x != 0).all())
expected = Series([False, True], index=index, name="b")
tm.assert_series_equal(result, expected)
result = gp["c"].aggregate(lambda x: x.isnull().all())
expected = Series([True, False], index=index, name="c")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"op",
[
lambda x: x.sum(),
lambda x: x.cumsum(),
lambda x: x.transform("sum"),
lambda x: x.transform("cumsum"),
lambda x: x.agg("sum"),
lambda x: x.agg("cumsum"),
],
)
def test_bool_agg_dtype(op):
# GH 7001
# Bool sum aggregations result in int
df = DataFrame({"a": [1, 1], "b": [False, True]})
s = df.set_index("a")["b"]
result = op(df.groupby("a"))["b"].dtype
assert is_integer_dtype(result)
result = op(s.groupby("a")).dtype
assert is_integer_dtype(result)
def test_order_aggregate_multiple_funcs():
# GH 25692
df = DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4]})
res = df.groupby("A").agg(["sum", "max", "mean", "ohlc", "min"])
result = res.columns.levels[1]
expected = Index(["sum", "max", "mean", "ohlc", "min"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.int64, np.uint64])
@pytest.mark.parametrize("how", ["first", "last", "min", "max", "mean", "median"])
def test_uint64_type_handling(dtype, how):
# GH 26310
df = DataFrame({"x": 6903052872240755750, "y": [1, 2]})
expected = df.groupby("y").agg({"x": how})
df.x = df.x.astype(dtype)
result = df.groupby("y").agg({"x": how})
result.x = result.x.astype(np.int64)
tm.assert_frame_equal(result, expected, check_exact=True)
def test_func_duplicates_raises():
# GH28426
msg = "Function names"
df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
with pytest.raises(SpecificationError, match=msg):
df.groupby("A").agg(["min", "min"])
@pytest.mark.parametrize(
"index",
[
pd.CategoricalIndex(list("abc")),
pd.interval_range(0, 3),
pd.period_range("2020", periods=3, freq="D"),
pd.MultiIndex.from_tuples([("a", 0), ("a", 1), ("b", 0)]),
],
)
def test_agg_index_has_complex_internals(index):
# GH 31223
df = DataFrame({"group": [1, 1, 2], "value": [0, 1, 0]}, index=index)
result = df.groupby("group").agg({"value": Series.nunique})
expected = DataFrame({"group": [1, 2], "value": [2, 1]}).set_index("group")
tm.assert_frame_equal(result, expected)
def test_agg_split_block():
# https://github.com/pandas-dev/pandas/issues/31522
df = DataFrame(
{
"key1": ["a", "a", "b", "b", "a"],
"key2": ["one", "two", "one", "two", "one"],
"key3": ["three", "three", "three", "six", "six"],
}
)
result = df.groupby("key1").min()
expected = DataFrame(
{"key2": ["one", "one"], "key3": ["six", "six"]},
index=Index(["a", "b"], name="key1"),
)
tm.assert_frame_equal(result, expected)
def test_agg_split_object_part_datetime():
# https://github.com/pandas-dev/pandas/pull/31616
df = DataFrame(
{
"A": pd.date_range("2000", periods=4),
"B": ["a", "b", "c", "d"],
"C": [1, 2, 3, 4],
"D": ["b", "c", "d", "e"],
"E": pd.date_range("2000", periods=4),
"F": [1, 2, 3, 4],
}
).astype(object)
result = df.groupby([0, 0, 0, 0]).min()
expected = DataFrame(
{
"A": [pd.Timestamp("2000")],
"B": ["a"],
"C": [1],
"D": ["b"],
"E": [pd.Timestamp("2000")],
"F": [1],
}
)
tm.assert_frame_equal(result, expected)
class TestNamedAggregationSeries:
def test_series_named_agg(self):
df = Series([1, 2, 3, 4])
gr = df.groupby([0, 0, 1, 1])
result = gr.agg(a="sum", b="min")
expected = DataFrame(
{"a": [3, 7], "b": [1, 3]}, columns=["a", "b"], index=[0, 1]
)
tm.assert_frame_equal(result, expected)
result = gr.agg(b="min", a="sum")
expected = expected[["b", "a"]]
tm.assert_frame_equal(result, expected)
def test_no_args_raises(self):
gr = Series([1, 2]).groupby([0, 1])
with pytest.raises(TypeError, match="Must provide"):
gr.agg()
# but we do allow this
result = gr.agg([])
expected = DataFrame()
tm.assert_frame_equal(result, expected)
def test_series_named_agg_duplicates_no_raises(self):
# GH28426
gr = Series([1, 2, 3]).groupby([0, 0, 1])
grouped = gr.agg(a="sum", b="sum")
expected = DataFrame({"a": [3, 3], "b": [3, 3]})
tm.assert_frame_equal(expected, grouped)
def test_mangled(self):
gr = Series([1, 2, 3]).groupby([0, 0, 1])
result = gr.agg(a=lambda x: 0, b=lambda x: 1)
expected = DataFrame({"a": [0, 0], "b": [1, 1]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"inp",
[
pd.NamedAgg(column="anything", aggfunc="min"),
("anything", "min"),
["anything", "min"],
],
)
def test_named_agg_nametuple(self, inp):
# GH34422
s = Series([1, 1, 2, 2, 3, 3, 4, 5])
msg = f"func is expected but received {type(inp).__name__}"
with pytest.raises(TypeError, match=msg):
s.groupby(s.values).agg(a=inp)
class TestNamedAggregationDataFrame:
def test_agg_relabel(self):
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
result = df.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max"))
expected = DataFrame(
{"a_max": [1, 3], "b_max": [6, 8]},
index=Index(["a", "b"], name="group"),
columns=["a_max", "b_max"],
)
tm.assert_frame_equal(result, expected)
# order invariance
p98 = functools.partial(np.percentile, q=98)
result = df.groupby("group").agg(
b_min=("B", "min"),
a_min=("A", min),
a_mean=("A", np.mean),
a_max=("A", "max"),
b_max=("B", "max"),
a_98=("A", p98),
)
expected = DataFrame(
{
"b_min": [5, 7],
"a_min": [0, 2],
"a_mean": [0.5, 2.5],
"a_max": [1, 3],
"b_max": [6, 8],
"a_98": [0.98, 2.98],
},
index=Index(["a", "b"], name="group"),
columns=["b_min", "a_min", "a_mean", "a_max", "b_max", "a_98"],
)
tm.assert_frame_equal(result, expected)
def test_agg_relabel_non_identifier(self):
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
result = df.groupby("group").agg(**{"my col": ("A", "max")})
expected = DataFrame({"my col": [1, 3]}, index=Index(["a", "b"], name="group"))
tm.assert_frame_equal(result, expected)
def test_duplicate_no_raises(self):
# GH 28426, if use same input function on same column,
# no error should raise
df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
grouped = df.groupby("A").agg(a=("B", "min"), b=("B", "min"))
expected = DataFrame({"a": [1, 3], "b": [1, 3]}, index=Index([0, 1], name="A"))
tm.assert_frame_equal(grouped, expected)
quant50 = functools.partial(np.percentile, q=50)
quant70 = functools.partial(np.percentile, q=70)
quant50.__name__ = "quant50"
quant70.__name__ = "quant70"
test = DataFrame({"col1": ["a", "a", "b", "b", "b"], "col2": [1, 2, 3, 4, 5]})
grouped = test.groupby("col1").agg(
quantile_50=("col2", quant50), quantile_70=("col2", quant70)
)
expected = DataFrame(
{"quantile_50": [1.5, 4.0], "quantile_70": [1.7, 4.4]},
index=Index(["a", "b"], name="col1"),
)
tm.assert_frame_equal(grouped, expected)
def test_agg_relabel_with_level(self):
df = DataFrame(
{"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]},
index=pd.MultiIndex.from_product([["A", "B"], ["a", "b"]]),
)
result = df.groupby(level=0).agg(
aa=("A", "max"), bb=("A", "min"), cc=("B", "mean")
)
expected = DataFrame(
{"aa": [0, 1], "bb": [0, 1], "cc": [1.5, 3.5]}, index=["A", "B"]
)
tm.assert_frame_equal(result, expected)
def test_agg_relabel_other_raises(self):
df = DataFrame({"A": [0, 0, 1], "B": [1, 2, 3]})
grouped = df.groupby("A")
match = "Must provide"
with pytest.raises(TypeError, match=match):
grouped.agg(foo=1)
with pytest.raises(TypeError, match=match):
grouped.agg()
with pytest.raises(TypeError, match=match):
grouped.agg(a=("B", "max"), b=(1, 2, 3))
def test_missing_raises(self):
df = DataFrame({"A": [0, 1], "B": [1, 2]})
with pytest.raises(KeyError, match="Column 'C' does not exist"):
df.groupby("A").agg(c=("C", "sum"))
def test_agg_namedtuple(self):
df = DataFrame({"A": [0, 1], "B": [1, 2]})
result = df.groupby("A").agg(
b=pd.NamedAgg("B", "sum"), c=pd.NamedAgg(column="B", aggfunc="count")
)
expected = df.groupby("A").agg(b=("B", "sum"), c=("B", "count"))
tm.assert_frame_equal(result, expected)
def test_mangled(self):
df = DataFrame({"A": [0, 1], "B": [1, 2], "C": [3, 4]})
result = df.groupby("A").agg(b=("B", lambda x: 0), c=("C", lambda x: 1))
expected = DataFrame({"b": [0, 0], "c": [1, 1]}, index=Index([0, 1], name="A"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3",
[
(
(("y", "A"), "max"),
(("y", "A"), np.min),
(("y", "B"), "mean"),
[1, 3],
[0, 2],
[5.5, 7.5],
),
(
(("y", "A"), lambda x: max(x)),
(("y", "A"), lambda x: 1),
(("y", "B"), "mean"),
[1, 3],
[1, 1],
[5.5, 7.5],
),
(
pd.NamedAgg(("y", "A"), "max"),
pd.NamedAgg(("y", "B"), np.mean),
pd.NamedAgg(("y", "A"), lambda x: 1),
[1, 3],
[5.5, 7.5],
[1, 1],
),
],
)
def test_agg_relabel_multiindex_column(
agg_col1, agg_col2, agg_col3, agg_result1, agg_result2, agg_result3
):
# GH 29422, add tests for multiindex column cases
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
df.columns = pd.MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
idx = Index(["a", "b"], name=("x", "group"))
result = df.groupby(("x", "group")).agg(a_max=(("y", "A"), "max"))
expected = DataFrame({"a_max": [1, 3]}, index=idx)
tm.assert_frame_equal(result, expected)
result = df.groupby(("x", "group")).agg(
col_1=agg_col1, col_2=agg_col2, col_3=agg_col3
)
expected = DataFrame(
{"col_1": agg_result1, "col_2": agg_result2, "col_3": agg_result3}, index=idx
)
tm.assert_frame_equal(result, expected)
def test_agg_relabel_multiindex_raises_not_exist():
# GH 29422, add test for raises senario when aggregate column does not exist
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
df.columns = pd.MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
with pytest.raises(KeyError, match="does not exist"):
df.groupby(("x", "group")).agg(a=(("Y", "a"), "max"))
def test_agg_relabel_multiindex_duplicates():
# GH29422, add test for raises senario when getting duplicates
# GH28426, after this change, duplicates should also work if the relabelling is
# different
df = DataFrame(
{"group": ["a", "a", "b", "b"], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}
)
df.columns = pd.MultiIndex.from_tuples([("x", "group"), ("y", "A"), ("y", "B")])
result = df.groupby(("x", "group")).agg(
a=(("y", "A"), "min"), b=(("y", "A"), "min")
)
idx = Index(["a", "b"], name=("x", "group"))
expected = DataFrame({"a": [0, 2], "b": [0, 2]}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [{"c": ["min"]}, {"b": [], "c": ["min"]}])
def test_groupby_aggregate_empty_key(kwargs):
# GH: 32580
df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
result = df.groupby("a").agg(kwargs)
expected = DataFrame(
[1, 4],
index=Index([1, 2], dtype="int64", name="a"),
columns=pd.MultiIndex.from_tuples([["c", "min"]]),
)
tm.assert_frame_equal(result, expected)
def test_groupby_aggregate_empty_key_empty_return():
# GH: 32580 Check if everything works, when return is empty
df = DataFrame({"a": [1, 1, 2], "b": [1, 2, 3], "c": [1, 2, 4]})
result = df.groupby("a").agg({"b": []})
expected = DataFrame(columns=pd.MultiIndex(levels=[["b"], []], codes=[[], []]))
tm.assert_frame_equal(result, expected)
def test_grouby_agg_loses_results_with_as_index_false_relabel():
# GH 32240: When the aggregate function relabels column names and
# as_index=False is specified, the results are dropped.
df = DataFrame(
{"key": ["x", "y", "z", "x", "y", "z"], "val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75]}
)
grouped = df.groupby("key", as_index=False)
result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
expected = DataFrame({"key": ["x", "y", "z"], "min_val": [1.0, 0.8, 0.75]})
tm.assert_frame_equal(result, expected)
def test_grouby_agg_loses_results_with_as_index_false_relabel_multiindex():
# GH 32240: When the aggregate function relabels column names and
# as_index=False is specified, the results are dropped. Check if
# multiindex is returned in the right order
df = DataFrame(
{
"key": ["x", "y", "x", "y", "x", "x"],
"key1": ["a", "b", "c", "b", "a", "c"],
"val": [1.0, 0.8, 2.0, 3.0, 3.6, 0.75],
}
)
grouped = df.groupby(["key", "key1"], as_index=False)
result = grouped.agg(min_val=pd.NamedAgg(column="val", aggfunc="min"))
expected = DataFrame(
{"key": ["x", "x", "y"], "key1": ["a", "c", "b"], "min_val": [1.0, 0.75, 0.8]}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func", [lambda s: s.mean(), lambda s: np.mean(s), lambda s: np.nanmean(s)]
)
def test_multiindex_custom_func(func):
# GH 31777
data = [[1, 4, 2], [5, 7, 1]]
df = DataFrame(data, columns=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 3]]))
result = df.groupby(np.array([0, 1])).agg(func)
expected_dict = {(1, 3): {0: 1, 1: 5}, (1, 4): {0: 4, 1: 7}, (2, 3): {0: 2, 1: 1}}
expected = DataFrame(expected_dict)
tm.assert_frame_equal(result, expected)
def myfunc(s):
return np.percentile(s, q=0.90)
@pytest.mark.parametrize("func", [lambda s: np.percentile(s, q=0.90), myfunc])
def test_lambda_named_agg(func):
# see gh-28467
animals = DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
result = animals.groupby("kind").agg(
mean_height=("height", "mean"), perc90=("height", func)
)
expected = DataFrame(
[[9.3, 9.1036], [20.0, 6.252]],
columns=["mean_height", "perc90"],
index=Index(["cat", "dog"], name="kind"),
)
tm.assert_frame_equal(result, expected)
def test_aggregate_mixed_types():
# GH 16916
df = DataFrame(
data=np.array([0] * 9).reshape(3, 3), columns=list("XYZ"), index=list("abc")
)
df["grouping"] = ["group 1", "group 1", 2]
result = df.groupby("grouping").aggregate(lambda x: x.tolist())
expected_data = [[[0], [0], [0]], [[0, 0], [0, 0], [0, 0]]]
expected = DataFrame(
expected_data,
index=Index([2, "group 1"], dtype="object", name="grouping"),
columns=Index(["X", "Y", "Z"], dtype="object"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="Not implemented;see GH 31256")
def test_aggregate_udf_na_extension_type():
# https://github.com/pandas-dev/pandas/pull/31359
# This is currently failing to cast back to Int64Dtype.
# The presence of the NA causes two problems
# 1. NA is not an instance of Int64Dtype.type (numpy.int64)
# 2. The presence of an NA forces object type, so the non-NA values is
# a Python int rather than a NumPy int64. Python ints aren't
# instances of numpy.int64.
def aggfunc(x):
if all(x > 2):
return 1
else:
return pd.NA
df = DataFrame({"A": pd.array([1, 2, 3])})
result = df.groupby([1, 1, 2]).agg(aggfunc)
expected = DataFrame({"A": pd.array([1, pd.NA], dtype="Int64")}, index=[1, 2])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_groupby_aggregate_period_column(func):
# GH 31471
groups = [1, 2]
periods = pd.period_range("2020", periods=2, freq="Y")
df = DataFrame({"a": groups, "b": periods})
result = getattr(df.groupby("a")["b"], func)()
idx = pd.Int64Index([1, 2], name="a")
expected = Series(periods, index=idx, name="b")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_groupby_aggregate_period_frame(func):
# GH 31471
groups = [1, 2]
periods = pd.period_range("2020", periods=2, freq="Y")
df = DataFrame({"a": groups, "b": periods})
result = getattr(df.groupby("a"), func)()
idx = pd.Int64Index([1, 2], name="a")
expected = DataFrame({"b": periods}, index=idx)
tm.assert_frame_equal(result, expected)
class TestLambdaMangling:
def test_basic(self):
df = DataFrame({"A": [0, 0, 1, 1], "B": [1, 2, 3, 4]})
result = df.groupby("A").agg({"B": [lambda x: 0, lambda x: 1]})
expected = DataFrame(
{("B", "<lambda_0>"): [0, 0], ("B", "<lambda_1>"): [1, 1]},
index=Index([0, 1], name="A"),
)
tm.assert_frame_equal(result, expected)
def test_mangle_series_groupby(self):
gr = Series([1, 2, 3, 4]).groupby([0, 0, 1, 1])
result = gr.agg([lambda x: 0, lambda x: 1])
expected = DataFrame({"<lambda_0>": [0, 0], "<lambda_1>": [1, 1]})
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="GH-26611. kwargs for multi-agg.")
def test_with_kwargs(self):
f1 = lambda x, y, b=1: x.sum() + y + b
f2 = lambda x, y, b=2: x.sum() + y * b
result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0)
expected = DataFrame({"<lambda_0>": [4], "<lambda_1>": [6]})
tm.assert_frame_equal(result, expected)
result = Series([1, 2]).groupby([0, 0]).agg([f1, f2], 0, b=10)
expected = DataFrame({"<lambda_0>": [13], "<lambda_1>": [30]})
tm.assert_frame_equal(result, expected)
def test_agg_with_one_lambda(self):
# GH 25719, write tests for DataFrameGroupby.agg with only one lambda
df = DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
columns = ["height_sqr_min", "height_max", "weight_max"]
expected = DataFrame(
{
"height_sqr_min": [82.81, 36.00],
"height_max": [9.5, 34.0],
"weight_max": [9.9, 198.0],
},
index=Index(["cat", "dog"], name="kind"),
columns=columns,
)
# check pd.NameAgg case
result1 = df.groupby(by="kind").agg(
height_sqr_min=pd.NamedAgg(
column="height", aggfunc=lambda x: np.min(x ** 2)
),
height_max=pd.NamedAgg(column="height", aggfunc="max"),
weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
)
tm.assert_frame_equal(result1, expected)
# check agg(key=(col, aggfunc)) case
result2 = df.groupby(by="kind").agg(
height_sqr_min=("height", lambda x: np.min(x ** 2)),
height_max=("height", "max"),
weight_max=("weight", "max"),
)
tm.assert_frame_equal(result2, expected)
def test_agg_multiple_lambda(self):
# GH25719, test for DataFrameGroupby.agg with multiple lambdas
# with mixed aggfunc
df = DataFrame(
{
"kind": ["cat", "dog", "cat", "dog"],
"height": [9.1, 6.0, 9.5, 34.0],
"weight": [7.9, 7.5, 9.9, 198.0],
}
)
columns = [
"height_sqr_min",
"height_max",
"weight_max",
"height_max_2",
"weight_min",
]
expected = DataFrame(
{
"height_sqr_min": [82.81, 36.00],
"height_max": [9.5, 34.0],
"weight_max": [9.9, 198.0],
"height_max_2": [9.5, 34.0],
"weight_min": [7.9, 7.5],
},
index=Index(["cat", "dog"], name="kind"),
columns=columns,
)
# check agg(key=(col, aggfunc)) case
result1 = df.groupby(by="kind").agg(
height_sqr_min=("height", lambda x: np.min(x ** 2)),
height_max=("height", "max"),
weight_max=("weight", "max"),
height_max_2=("height", lambda x: np.max(x)),
weight_min=("weight", lambda x: np.min(x)),
)
tm.assert_frame_equal(result1, expected)
# check pd.NamedAgg case
result2 = df.groupby(by="kind").agg(
height_sqr_min=pd.NamedAgg(
column="height", aggfunc=lambda x: np.min(x ** 2)
),
height_max=pd.NamedAgg(column="height", aggfunc="max"),
weight_max=pd.NamedAgg(column="weight", aggfunc="max"),
height_max_2=pd.NamedAgg(column="height", aggfunc=lambda x: np.max(x)),
weight_min=pd.NamedAgg(column="weight", aggfunc=lambda x: np.min(x)),
)
tm.assert_frame_equal(result2, expected)
def test_groupby_get_by_index():
# GH 33439
df = DataFrame({"A": ["S", "W", "W"], "B": [1.0, 1.0, 2.0]})
res = df.groupby("A").agg({"B": lambda x: x.get(x.index[-1])})
expected = DataFrame({"A": ["S", "W"], "B": [1.0, 2.0]}).set_index("A")
pd.testing.assert_frame_equal(res, expected)
@pytest.mark.parametrize(
"grp_col_dict, exp_data",
[
({"nr": "min", "cat_ord": "min"}, {"nr": [1, 5], "cat_ord": ["a", "c"]}),
({"cat_ord": "min"}, {"cat_ord": ["a", "c"]}),
({"nr": "min"}, {"nr": [1, 5]}),
],
)
def test_groupby_single_agg_cat_cols(grp_col_dict, exp_data):
# test single aggregations on ordered categorical cols GHGH27800
# create the result dataframe
input_df = DataFrame(
{
"nr": [1, 2, 3, 4, 5, 6, 7, 8],
"cat_ord": list("aabbccdd"),
"cat": list("aaaabbbb"),
}
)
input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
result_df = input_df.groupby("cat").agg(grp_col_dict)
# create expected dataframe
cat_index = pd.CategoricalIndex(
["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
)
expected_df = DataFrame(data=exp_data, index=cat_index)
tm.assert_frame_equal(result_df, expected_df)
@pytest.mark.parametrize(
"grp_col_dict, exp_data",
[
({"nr": ["min", "max"], "cat_ord": "min"}, [(1, 4, "a"), (5, 8, "c")]),
({"nr": "min", "cat_ord": ["min", "max"]}, [(1, "a", "b"), (5, "c", "d")]),
({"cat_ord": ["min", "max"]}, [("a", "b"), ("c", "d")]),
],
)
def test_groupby_combined_aggs_cat_cols(grp_col_dict, exp_data):
# test combined aggregations on ordered categorical cols GH27800
# create the result dataframe
input_df = DataFrame(
{
"nr": [1, 2, 3, 4, 5, 6, 7, 8],
"cat_ord": list("aabbccdd"),
"cat": list("aaaabbbb"),
}
)
input_df = input_df.astype({"cat": "category", "cat_ord": "category"})
input_df["cat_ord"] = input_df["cat_ord"].cat.as_ordered()
result_df = input_df.groupby("cat").agg(grp_col_dict)
# create expected dataframe
cat_index = pd.CategoricalIndex(
["a", "b"], categories=["a", "b"], ordered=False, name="cat", dtype="category"
)
# unpack the grp_col_dict to create the multi-index tuple
# this tuple will be used to create the expected dataframe index
multi_index_list = []
for k, v in grp_col_dict.items():
if isinstance(v, list):
for value in v:
multi_index_list.append([k, value])
else:
multi_index_list.append([k, v])
multi_index = pd.MultiIndex.from_tuples(tuple(multi_index_list))
expected_df = DataFrame(data=exp_data, columns=multi_index, index=cat_index)
tm.assert_frame_equal(result_df, expected_df)
def test_nonagg_agg():
# GH 35490 - Single/Multiple agg of non-agg function give same results
# TODO: agg should raise for functions that don't aggregate
df = DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 2, 1]})
g = df.groupby("a")
result = g.agg(["cumsum"])
result.columns = result.columns.droplevel(-1)
expected = g.agg("cumsum")
tm.assert_frame_equal(result, expected)
def test_agg_no_suffix_index():
# GH36189
df = DataFrame([[4, 9]] * 3, columns=["A", "B"])
result = df.agg(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = DataFrame(
{"A": [12, 12, 12], "B": [27, 27, 27]}, index=["sum", "<lambda>", "<lambda>"]
)
tm.assert_frame_equal(result, expected)
# test Series case
result = df["A"].agg(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"], name="A")
tm.assert_series_equal(result, expected)
def test_aggregate_datetime_objects():
# https://github.com/pandas-dev/pandas/issues/36003
# ensure we don't raise an error but keep object dtype for out-of-bounds
# datetimes
df = DataFrame(
{
"A": ["X", "Y"],
"B": [
datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
datetime.datetime(3005, 1, 1, 10, 30, 23, 540000),
],
}
)
result = df.groupby("A").B.max()
expected = df.set_index("A")["B"]
tm.assert_series_equal(result, expected)
| 33.043294
| 87
| 0.548208
|
1371358c00a6864748d537bc0a5bbc9d562a707f
| 1,127
|
py
|
Python
|
pdm/formats/__init__.py
|
frafra/pdm
|
12c5c4f91bbb7260be7d93f3e3914ba708309032
|
[
"MIT"
] | 1
|
2022-02-21T05:55:21.000Z
|
2022-02-21T05:55:21.000Z
|
pdm/formats/__init__.py
|
frafra/pdm
|
12c5c4f91bbb7260be7d93f3e3914ba708309032
|
[
"MIT"
] | null | null | null |
pdm/formats/__init__.py
|
frafra/pdm
|
12c5c4f91bbb7260be7d93f3e3914ba708309032
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from argparse import Namespace
from pathlib import Path
from typing import Iterable, Mapping, TypeVar, cast
from pdm._types import Protocol
from pdm.formats import flit, legacy, pipfile, poetry, requirements, setup_py
from pdm.models.candidates import Candidate
from pdm.models.requirements import Requirement
from pdm.project import Project
_T = TypeVar("_T", Candidate, Requirement)
class _Format(Protocol):
def check_fingerprint(self, project: Project | None, filename: str | Path) -> bool:
...
def convert(
self,
project: Project | None,
filename: str | Path,
options: Namespace | None,
) -> tuple[Mapping, Mapping]:
...
def export(
self, project: Project, candidates: Iterable[_T], options: Namespace | None
) -> str:
...
FORMATS: Mapping[str, _Format] = {
"pipfile": cast(_Format, pipfile),
"poetry": cast(_Format, poetry),
"flit": cast(_Format, flit),
"requirements": cast(_Format, requirements),
"legacy": cast(_Format, legacy),
"setuppy": cast(_Format, setup_py),
}
| 26.833333
| 87
| 0.675244
|
d1fd00c0c8af6ac829ea112bbd092b3c563d5a07
| 3,369
|
py
|
Python
|
profiles_project/settings.py
|
Choisj91/profiles-rest-api
|
1f4d9002476b869610d79c17cf4f2f7f2290989d
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
Choisj91/profiles-rest-api
|
1f4d9002476b869610d79c17cf4f2f7f2290989d
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
Choisj91/profiles-rest-api
|
1f4d9002476b869610d79c17cf4f2f7f2290989d
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4j3x_9a#zud-ja@2@(4$0v11c(#bro)xla@%n&lm-56cz#w%mb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG',1)))
ALLOWED_HOSTS = [
'ec2-3-36-133-2.ap-northeast-2.compute.amazonaws.com',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/'
| 25.717557
| 91
| 0.696646
|
d99fac043390f654cbcd108c466af414edfc7b1d
| 11,722
|
py
|
Python
|
dirhunt/processors.py
|
TheRipperJhon/Dirhunt
|
056336cac3a44297d3a4bb294faccb855b511e9d
|
[
"MIT"
] | null | null | null |
dirhunt/processors.py
|
TheRipperJhon/Dirhunt
|
056336cac3a44297d3a4bb294faccb855b511e9d
|
[
"MIT"
] | null | null | null |
dirhunt/processors.py
|
TheRipperJhon/Dirhunt
|
056336cac3a44297d3a4bb294faccb855b511e9d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import re
from bs4 import Comment
from colorama import Fore, Back
from dirhunt.colors import status_code_colors
from dirhunt.crawler_url import CrawlerUrl
from dirhunt.url import Url
from dirhunt.utils import colored
INDEX_FILES = ['index.php', 'index.html', 'index.html']
ACCEPTED_PROTOCOLS = ['http', 'https']
def full_url_address(address, url):
"""
:type url: Url
:type address: str
:rtype :Url
"""
if address is None:
return
protocol_match = address.split(':', 1)[0] if ':' in address else ''
protocol_match = re.match('^([A-z0-9\\-]+)$', protocol_match)
if protocol_match and protocol_match.group(1) not in ACCEPTED_PROTOCOLS:
return
# TODO: mejorar esto. Aceptar otros protocolos a rechazar
if address.startswith('//'):
address = address.replace('//', '{}://'.format(url.protocol), 1)
if '://' not in address or address.startswith('/'):
url = url.copy()
url.path = address
return url
url = Url(address)
if url.is_valid():
return url
class ProcessBase(object):
name = ''
key_name = ''
index_file = None
status_code = 0
def __init__(self, response, crawler_url):
"""
:type crawler_url: CrawlerUrl
"""
# TODO: hay que pensar en no pasar response, text y soup por aquí para establecerlo en self,
# para no llenar la memoria. Deben ser cosas "volátiles".
if response is not None:
self.status_code = response.status_code
# TODO: procesar otras cosas (css, etc.)
self.crawler_url = crawler_url
def search_index_files(self):
if self.crawler_url.type not in ['directory', None]:
return
crawler = self.crawler_url.crawler
for index_file in INDEX_FILES:
url = self.crawler_url.url.copy()
url.set_children(index_file)
future = self.crawler_url.crawler.add_url(
CrawlerUrl(crawler, url, self.crawler_url.depth - 1, self, None, 'document',
timeout=self.crawler_url.timeout), True)
if self.crawler_url.crawler.closing:
return
result = future.result()
if result.exists:
self.index_file = url
break
@classmethod
def is_applicable(cls, request, text, crawler_url, soup):
raise NotImplementedError
def process(self, text, soup=None):
raise NotImplementedError
@property
def flags(self):
return {self.key_name}
def maybe_directory(self):
return self.crawler_url.maybe_directory()
def url_line(self):
body = colored('[{}]'.format(self.status_code), status_code_colors(self.status_code))
body += ' {} '.format(self.crawler_url.url.url)
body += colored(' ({})'.format(self.name or self.__class__.__name__), Fore.LIGHTYELLOW_EX)
return body
def __str__(self):
body = self.url_line()
if self.index_file:
body += colored('\n Index file found: ', Fore.BLUE)
body += '{}'.format(self.index_file.name)
return body
class Error(ProcessBase):
name = 'Error'
key_name = 'error'
def __init__(self, crawler_url, error):
super(Error, self).__init__(None, crawler_url)
self.error = error
def process(self, text, soup=None):
pass
def __str__(self):
body = colored('[ERROR]', Back.LIGHTRED_EX, Fore.LIGHTWHITE_EX)
body += ' {} '.format(self.crawler_url.url.url)
body += colored('({})'.format(self.error), Fore.LIGHTYELLOW_EX)
return body
@classmethod
def is_applicable(cls, request, text, crawler_url, soup):
pass
class GenericProcessor(ProcessBase):
name = 'Generic'
key_name = 'generic'
def process(self, text, soup=None):
self.search_index_files()
class ProcessRedirect(ProcessBase):
name = 'Redirect'
key_name = 'redirect'
redirector = None
def __init__(self, response, crawler_url):
super(ProcessRedirect, self).__init__(response, crawler_url)
self.redirector = full_url_address(response.headers.get('Location'), self.crawler_url.url)
def process(self, text, soup=None):
self.crawler_url.crawler.add_url(CrawlerUrl(self.crawler_url.crawler, self.redirector, 3, self.crawler_url,
timeout=self.crawler_url.timeout))
@classmethod
def is_applicable(cls, request, text, crawler_url, soup):
return 300 <= request.status_code < 400
def __str__(self):
body = super(ProcessRedirect, self).__str__()
body += colored('\n Redirect to: ', Fore.BLUE)
body += '{}'.format(self.redirector.address)
return body
class ProcessNotFound(ProcessBase):
name = 'Not Found'
key_name = 'not_found'
def process(self, text, soup=None):
self.search_index_files()
@classmethod
def is_applicable(cls, request, text, crawler_url, soup):
return request.status_code == 404
def __str__(self):
body = self.url_line()
if self.crawler_url.exists:
body += colored(' (FAKE 404)', Fore.YELLOW)
if self.index_file:
body += '\n Index file found: {}'.format(self.index_file.name)
return body
@property
def flags(self):
flags = super(ProcessNotFound, self).flags
if self.crawler_url.exists:
flags.update({'{}.fake'.format(self.key_name)})
return flags
class ProcessHtmlRequest(ProcessBase):
name = 'HTML document'
key_name = 'html'
def process(self, text, soup=None):
self.assets(soup)
self.links(soup)
self.search_index_files()
def links(self, soup):
links = [full_url_address(link.attrs.get('href'), self.crawler_url.url)
for link in soup.find_all('a')]
for link in filter(bool, links):
url = Url(link)
if not url.is_valid():
continue
depth = self.crawler_url.depth
if url.domain != self.crawler_url.url.domain or \
not url.path.startswith(self.crawler_url.url.directory_path):
depth -= 1
if depth <= 0:
continue
self.crawler_url.crawler.add_url(CrawlerUrl(self.crawler_url.crawler, link, depth, self.crawler_url,
timeout=self.crawler_url.timeout))
def assets(self, soup):
assets = [full_url_address(link.attrs.get('href'), self.crawler_url.url)
for link in soup.find_all('link')]
assets += [full_url_address(script.attrs.get('src'), self.crawler_url.url)
for script in soup.find_all('script')]
assets += [full_url_address(img.attrs.get('src'), self.crawler_url.url)
for img in soup.find_all('img')]
for asset in filter(bool, assets):
self.analyze_asset(asset)
self.crawler_url.crawler.add_url(CrawlerUrl(self.crawler_url.crawler, asset, 3, self.crawler_url,
type='asset', timeout=self.crawler_url.timeout))
def analyze_asset(self, asset):
"""
:type asset: Url
"""
if 'wordpress' not in self.crawler_url.flags and 'wp-content' in asset.path:
self.crawler_url.flags.update({'wordpress'})
# Override type always except for root path
self.crawler_url.type = 'rewrite' if self.crawler_url.type != 'directory' else 'directory'
self.crawler_url.depth -= 1
@classmethod
def is_applicable(cls, response, text, crawler_url, soup):
return response.headers.get('Content-Type', '').lower().startswith('text/html') and response.status_code < 300 \
and soup is not None
class ProcessIndexOfRequest(ProcessHtmlRequest):
name = 'Index Of'
key_name = 'index_of'
files = None
index_titles = ('index of', 'directory listing for')
def process(self, text, soup=None):
links = [full_url_address(link.attrs.get('href'), self.crawler_url.url)
for link in soup.find_all('a')]
for link in filter(lambda x: x.url.endswith('/'), links):
self.crawler_url.crawler.add_url(CrawlerUrl(self.crawler_url.crawler, link, 3, self.crawler_url,
type='directory', timeout=self.crawler_url.timeout))
self.files = [Url(link) for link in links]
def interesting_ext_files(self):
return filter(lambda x: x.name.split('.')[-1] in self.crawler_url.crawler.interesting_extensions, self.files)
def interesting_name_files(self):
return filter(lambda x: x.name in self.crawler_url.crawler.interesting_files, self.files)
def interesting_files(self):
for iterator in [self.interesting_ext_files(), self.interesting_name_files()]:
for file in iterator:
yield file
def __str__(self):
body = super(ProcessIndexOfRequest, self).__str__()
ext_files = list(self.interesting_ext_files())
name_files = list(self.interesting_name_files())
if ext_files:
body += colored('\n Interesting extension files:', Fore.BLUE)
body += ' {}'.format(', '.join(map(lambda x: x.name, ext_files)))
if name_files:
body += colored('\n Interesting file names:', Fore.MAGENTA)
body += ' {}'.format(', '.join(map(lambda x: x.name, name_files)))
if not ext_files and not name_files:
body += colored(' (Nothing interesting)', Fore.LIGHTYELLOW_EX)
return body
@classmethod
def is_applicable(cls, response, text, crawler_url, soup):
if not super(ProcessIndexOfRequest, cls).is_applicable(response, text, crawler_url, soup):
return False
title = soup.find('title')
if not title:
return False
title = title.text.lower()
for index_title in cls.index_titles:
if title.startswith(index_title):
return True
return False
@property
def flags(self):
flags = super(ProcessHtmlRequest, self).flags
try:
next(self.interesting_files())
except StopIteration:
flags.update({'{}.nothing'.format(self.key_name)})
return flags
class ProcessBlankPageRequest(ProcessHtmlRequest):
name = 'Blank page'
key_name = 'blank'
@classmethod
def is_applicable(cls, response, text, crawler_url, soup):
if not super(ProcessBlankPageRequest, cls).is_applicable(response, text, crawler_url, soup):
return False
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
return True
texts = soup.findAll(text=True)
visible_texts = filter(tag_visible, texts)
for text in visible_texts:
if text.strip():
return False
return True
def get_processor(response, text, crawler_url, soup):
for processor_class in PROCESSORS:
if processor_class.is_applicable(response, text, crawler_url, soup):
# TODO: resp por None
return processor_class(response, crawler_url)
PROCESSORS = [
ProcessRedirect,
ProcessNotFound,
ProcessIndexOfRequest,
ProcessBlankPageRequest,
ProcessHtmlRequest,
]
| 34.476471
| 120
| 0.612609
|
22e7de9bbd94a633a304bb75965763d4b9d91cca
| 341
|
py
|
Python
|
Crawler/eBookCrawl.py
|
zyphs21/myPythonPractise
|
7da984c98ee93e650ab2f9da4a2502340f1220b4
|
[
"MIT"
] | null | null | null |
Crawler/eBookCrawl.py
|
zyphs21/myPythonPractise
|
7da984c98ee93e650ab2f9da4a2502340f1220b4
|
[
"MIT"
] | null | null | null |
Crawler/eBookCrawl.py
|
zyphs21/myPythonPractise
|
7da984c98ee93e650ab2f9da4a2502340f1220b4
|
[
"MIT"
] | null | null | null |
# -*- coding:UTF-8 -*-
from bs4 import BeautifulSoup
import requests
if __name__ == "__main__":
target = 'http://www.biqukan.com/1_1094/5403177.html'
req = requests.get(url = target)
html = req.text
bf = BeautifulSoup(html)
texts = bf.find_all('div', class_ = 'showtxt')
print(texts[0].text.replace('\xa0'*8,'\n\n'))
| 31
| 57
| 0.645161
|
c36aea22e9c375be0d8543ddc1eba0ff9f8acd15
| 258
|
py
|
Python
|
students/K33422/laboratory_works/Daria Plotskaya/lab_2/users/urls.py
|
olticher/ITMO_ICT_WebDevelopment_2021-2022
|
3de8728c29638d6733ad0664bf13e0d1eccae899
|
[
"MIT"
] | null | null | null |
students/K33422/laboratory_works/Daria Plotskaya/lab_2/users/urls.py
|
olticher/ITMO_ICT_WebDevelopment_2021-2022
|
3de8728c29638d6733ad0664bf13e0d1eccae899
|
[
"MIT"
] | null | null | null |
students/K33422/laboratory_works/Daria Plotskaya/lab_2/users/urls.py
|
olticher/ITMO_ICT_WebDevelopment_2021-2022
|
3de8728c29638d6733ad0664bf13e0d1eccae899
|
[
"MIT"
] | null | null | null |
from django.urls import path
from django.contrib.auth.views import LoginView
from .views import UserCreateFormView
urlpatterns = [
path("login/", LoginView.as_view(), name="login"),
path("register/", UserCreateFormView.as_view(), name="register")
]
| 28.666667
| 68
| 0.744186
|
6448e378d60663edc53e4b5c6cf1b596d24eb54c
| 3,586
|
py
|
Python
|
wagtail/contrib/postgres_search/models.py
|
Immensa/wagtail
|
cb33259c0f2aefd2d6e6b605d3e9a14387dbd01e
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/contrib/postgres_search/models.py
|
Immensa/wagtail
|
cb33259c0f2aefd2d6e6b605d3e9a14387dbd01e
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/contrib/postgres_search/models.py
|
Immensa/wagtail
|
cb33259c0f2aefd2d6e6b605d3e9a14387dbd01e
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import apps
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.contrib.postgres.indexes import GinIndex
from django.contrib.postgres.search import SearchQuery, SearchVectorField
from django.db.models import CASCADE, ForeignKey, Model, TextField
from django.db.models.functions import Cast
from django.utils.translation import gettext_lazy as _
from wagtail.search.index import class_is_indexed
from .utils import get_descendants_content_types_pks
class RawSearchQuery(SearchQuery):
def __init__(self, format, *args, **kwargs):
self.format = format
super().__init__(*args, **kwargs)
def as_sql(self, compiler, connection):
# escape apostrophe and backslash
params = [v.replace("'", "''").replace("\\", "\\\\") for v in self.value]
if self.config:
config_sql, config_params = compiler.compile(self.config)
template = "to_tsquery(%s::regconfig, '%s')" % (config_sql, self.format)
params = config_params + params
else:
template = "to_tsquery('%s')" % self.format
if self.invert:
template = '!!({})'.format(template)
return template, params
def __invert__(self):
extra = {
'invert': not self.invert,
'config': self.config,
}
return type(self)(self.format, self.value, **extra)
class TextIDGenericRelation(GenericRelation):
auto_created = True
def get_content_type_lookup(self, alias, remote_alias):
field = self.remote_field.model._meta.get_field(
self.content_type_field_name)
return field.get_lookup('in')(
field.get_col(remote_alias),
get_descendants_content_types_pks(self.model))
def get_object_id_lookup(self, alias, remote_alias):
from_field = self.remote_field.model._meta.get_field(
self.object_id_field_name)
to_field = self.model._meta.pk
return from_field.get_lookup('exact')(
from_field.get_col(remote_alias),
Cast(to_field.get_col(alias), from_field))
def get_extra_restriction(self, where_class, alias, remote_alias):
cond = where_class()
cond.add(self.get_content_type_lookup(alias, remote_alias), 'AND')
cond.add(self.get_object_id_lookup(alias, remote_alias), 'AND')
return cond
def resolve_related_fields(self):
return []
class IndexEntry(Model):
content_type = ForeignKey(ContentType, on_delete=CASCADE)
# We do not use an IntegerField since primary keys are not always integers.
object_id = TextField()
content_object = GenericForeignKey()
# TODO: Add per-object boosting.
autocomplete = SearchVectorField()
body = SearchVectorField()
class Meta:
unique_together = ('content_type', 'object_id')
verbose_name = _('index entry')
verbose_name_plural = _('index entries')
indexes = [GinIndex(fields=['autocomplete']),
GinIndex(fields=['body'])]
def __str__(self):
return '%s: %s' % (self.content_type.name, self.content_object)
@property
def model(self):
return self.content_type.model
@classmethod
def add_generic_relations(cls):
for model in apps.get_models():
if class_is_indexed(model):
TextIDGenericRelation(cls).contribute_to_class(model,
'index_entries')
| 36.222222
| 84
| 0.660904
|
9183c21ef1799c5477bd6e8789d0e41442ef4233
| 9,552
|
py
|
Python
|
src/OCR/architecture/CharacterRecognizer.py
|
tsteffek/LicensePlateReconstructor
|
4930a080fbdf6e7d726e5282b2d75650566fd5d4
|
[
"MIT"
] | 2
|
2020-12-21T02:02:13.000Z
|
2021-11-09T06:25:36.000Z
|
src/OCR/architecture/CharacterRecognizer.py
|
tsteffek/LicensePlateReconstructor
|
4930a080fbdf6e7d726e5282b2d75650566fd5d4
|
[
"MIT"
] | 1
|
2021-11-09T06:25:36.000Z
|
2021-11-18T08:35:35.000Z
|
src/OCR/architecture/CharacterRecognizer.py
|
tsteffek/LicensePlateReconstructor
|
4930a080fbdf6e7d726e5282b2d75650566fd5d4
|
[
"MIT"
] | null | null | null |
import logging
from argparse import ArgumentParser
from typing import Tuple, List, Union
import pytorch_lightning as pl
import pytorch_warmup as warmup
import torch
from torch import nn, Tensor, optim
from src.base.model import Vocabulary
from .mobilenetv3 import mobilenetv3_small, mobilenetv3_large
from .util import Img2Seq, ConfusionMatrix
log = logging.getLogger('pytorch_lightning').getChild(__name__)
class CharacterRecognizer(pl.LightningModule):
def __init__(
self, vocab: Vocabulary, img_size: Tuple[int, int],
mobile_net_variant: str = 'small', width_mult: float = 1.,
first_filter_shape: Union[Tuple[int, int], int] = 3, first_filter_stride: Union[Tuple[int, int], int] = 2,
lstm_hidden: int = 48,
log_per_epoch: int = 3,
lr: float = 1e-4, lr_schedule: str = None, lr_warm_up: str = None,
ctc_reduction: str = 'mean', **kwargs
):
super().__init__()
self.save_hyperparameters()
self.vocab = vocab
self.input_size = img_size
mobile_net_kwargs = {
'width_mult': width_mult,
'first_filter_shape': first_filter_shape,
'first_filter_stride': first_filter_stride
}
if mobile_net_variant == 'small':
self.cnns = mobilenetv3_small(**mobile_net_kwargs)
elif mobile_net_variant == 'large':
self.cnns = mobilenetv3_large(**mobile_net_kwargs)
self.img2seq = Img2Seq()
w, _, lstm_input_dim = self.img2seq(self.cnns(self.example_input_array)).shape
self.lstms = nn.LSTM(input_size=lstm_input_dim, hidden_size=lstm_hidden,
bidirectional=True, num_layers=2, dropout=0.5)
self.fc = nn.Linear(lstm_hidden * 2, len(self.vocab))
self.conv_output_size = torch.tensor(w, dtype=torch.int64)
self.loss_func = nn.CTCLoss(reduction=ctc_reduction, zero_infinity=True)
self.softmax = nn.LogSoftmax(dim=-1)
self.lr = lr
self.lr_schedule = lr_schedule
self.lr_warm_up = lr_warm_up
self.log_per_epoch = log_per_epoch
self.accuracy_cha = pl.metrics.Accuracy(compute_on_step=False)
self.accuracy_len = pl.metrics.Accuracy(compute_on_step=False)
self.confusion_matrix = ConfusionMatrix(self.vocab.noisy_chars)
self.confusion_matrix_len = ConfusionMatrix(list(map(str, range(w))))
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--mobile_net_variant', type=str, default='small')
parser.add_argument('--width_mult', type=float, default=1.)
parser.add_argument('--first_filter_shape', type=int, nargs='+', default=3)
parser.add_argument('--first_filter_stride', type=int, nargs='+', default=2)
parser.add_argument('--lstm_hidden', type=int, default=48)
parser.add_argument('--log_per_epoch', type=int, default=3, help='How many predictions to log')
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--lr_schedule', type=str, default=None, choices=['cosine', None])
parser.add_argument('--lr_warm_up', type=str, default=None, choices=['linear', 'exponential', None])
parser.add_argument('--ctc_reduction', type=str, default='mean', choices=['mean', 'sum', None])
return parser
@property
def example_input_array(self, batch_size: int = 4) -> Tensor:
return torch.randn(batch_size, 3, *self.input_size, dtype=torch.float32, device=self.device)
def forward(self, x: Tensor) -> Tensor:
cnn_output = self.cnns(x)
formatted = self.img2seq(cnn_output)
lstm_output, _ = self.lstms(formatted)
return self.fc(lstm_output)
def loss(
self, output: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor
) -> Tuple[Tensor, Tensor]:
logits = self.softmax(output)
return self.loss_func(logits, targets, input_lengths, target_lengths), logits
def predict(self, x: Tensor) -> List[str]:
output = self.forward(x)
logits = self.softmax.log_softmax(output)
texts = self.decode_raw(logits.transpose(0, 1))
return [self.vocab.decode_text(text) for text in texts]
def step(self, batch: Tuple[Tensor, Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tensor]:
x, (_, y, y_lengths) = batch
x_hat = self.forward(x)
batch_size = x_hat.shape[1]
return self.loss(x_hat, y, self.conv_output_size.repeat(batch_size), y_lengths)
def step_with_logging(
self, stage: str, batch: Tuple[Tensor, Tuple[Tensor, Tensor]], return_probe: bool
) -> Tuple[Tensor, Tensor, str]:
loss, logits = self.step(batch)
self.log(f'loss/{stage}', loss, on_epoch=True, sync_dist=True)
decoded = self.decode_raw(logits)
self.update_metrics(decoded, batch)
if return_probe:
return decoded[0], batch[0][0], batch[1][0][0]
def training_step(self, batch: Tuple[Tensor, Tuple[Tensor, Tensor]], batch_idx: int) -> Tensor:
loss = self.step(batch)[0]
self.log('loss/train', loss, on_step=True, on_epoch=True)
return loss
def validation_step(self, batch: Tuple[Tensor, Tuple[Tensor, Tensor]], batch_idx: int) -> Tuple[
Tensor, Tensor, str]:
return self.step_with_logging('val', batch, batch_idx < self.log_per_epoch)
def test_step(self, batch: Tuple[Tensor, Tuple[Tensor, Tensor]], batch_idx: int) -> Tuple[Tensor, Tensor, str]:
return self.step_with_logging('test', batch, batch_idx < self.log_per_epoch)
def update_metrics(self, decoded: List[Tensor], batch: Tuple[Tensor, Tuple[Tensor, Tensor]]):
predictions, pred_lengths = self.cat(decoded)
_, (_, y, y_lengths) = batch
matching_pred, matching_targets = self._get_matching_length_elements(predictions, pred_lengths, y, y_lengths)
self.accuracy_len.update(pred_lengths, y_lengths)
self.confusion_matrix_len.update(pred_lengths, y_lengths)
self.accuracy_cha.update(matching_pred, matching_targets)
self.confusion_matrix.update(matching_pred, matching_targets)
def decode_raw(self, logits: Tensor) -> List[Tensor]:
arg_max_batch = logits.transpose(0, 1).argmax(dim=-1)
uniques_batch = [torch.unique_consecutive(arg_max) for arg_max in arg_max_batch]
return [uniques[uniques != self.vocab.blank_idx] for uniques in uniques_batch]
def cat(self, arr: List[Tensor]) -> Tuple[Tensor, Tensor]:
pred_lengths = torch.tensor([len(pred) for pred in arr], dtype=torch.int64, device=self.device)
return torch.cat(arr), pred_lengths
@staticmethod
def _get_matching_length_elements(pred: Tensor, pred_lengths: Tensor, target: Tensor, target_lengths: Tensor):
mask: Tensor = pred_lengths.__eq__(target_lengths)
return pred[mask.repeat_interleave(pred_lengths)], \
target[mask.repeat_interleave(target_lengths)],
def validation_epoch_end(self, outputs: List[Tuple[Tensor, Tensor, str]]) -> None:
self.log_epoch('val', outputs)
def test_epoch_end(self, outputs: List[Tuple[Tensor, Tensor, str]]) -> None:
self.log_epoch('test', outputs)
def log_epoch(self, stage: str, outputs: List[Tuple[Tensor, Tensor, str]]):
acc_len = self.accuracy_len.compute()
acc_cha = self.accuracy_cha.compute()
self.log(f'accuracy/{stage}/length', acc_len, sync_dist=True)
self.log(f'accuracy/{stage}/char', acc_cha, sync_dist=True)
self.log(f'accuracy/{stage}', acc_len * acc_cha, sync_dist=True)
log.info(self.confusion_matrix_len.compute())
log.info(self.confusion_matrix.compute())
predictions = [f'"{self.vocab.decode_text(output[0])}" is actually "{output[2]}"' for output in outputs]
original_images = torch.stack([output[1] for output in outputs])
self.logger.experiment.add_images(f'{stage}/orig', original_images, self.global_step)
self.logger.experiment.add_text(f'{stage}/pred', '<br/>'.join(predictions), self.global_step)
def configure_optimizers(self):
if self.lr_schedule is None:
return optim.Adam(self.parameters(), lr=self.lr, weight_decay=1e-5)
elif self.lr_schedule == 'cosine':
optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=1e-5)
max_steps = len(self.train_dataloader()) // self.trainer.accumulate_grad_batches * self.trainer.max_epochs
schedules = [{
'scheduler': optim.lr_scheduler.CosineAnnealingLR(optimizer, max_steps),
'interval': 'step',
}]
if self.lr_warm_up:
if self.lr_warm_up == 'linear':
warmup_scheduler = warmup.UntunedLinearWarmup(optimizer)
elif self.lr_warm_up == 'exponential':
warmup_scheduler = warmup.UntunedExponentialWarmup(optimizer)
else:
raise ValueError('lr_warm_up can only be "linear" or "exponential", but was ' + self.lr_warm_up)
warmup_scheduler.step = warmup_scheduler.dampen
schedules.append({
'scheduler': warmup_scheduler,
'interval': 'step'
})
return [optimizer], schedules
def on_epoch_end(self):
log.info('\n')
| 45.703349
| 118
| 0.660385
|
9701987aecb544f9f2700dc8a2bfaf312a7c858d
| 11,719
|
py
|
Python
|
tests/integration/use_cases/test_clustering.py
|
dimensigon/dimensigon
|
079d7c91a66e10f13510d89844fbadb27e005b40
|
[
"Apache-2.0"
] | 2
|
2020-11-20T10:27:14.000Z
|
2021-02-21T13:57:56.000Z
|
tests/integration/use_cases/test_clustering.py
|
dimensigon/dimensigon
|
079d7c91a66e10f13510d89844fbadb27e005b40
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/use_cases/test_clustering.py
|
dimensigon/dimensigon
|
079d7c91a66e10f13510d89844fbadb27e005b40
|
[
"Apache-2.0"
] | null | null | null |
import datetime as dt
import threading
from unittest import TestCase, mock
from dimensigon.domain.entities import Server
try:
from unittest.mock import AsyncMock
except ImportError:
from tests.base import AsyncMock
import dimensigon.web.network as ntwrk
from dimensigon import defaults
from dimensigon.use_cases.cluster import ClusterManager, NewEvent, DeathEvent, ZombieEvent, _Entry, AliveEvent
from dimensigon.web import db
from tests.base import OneNodeMixin
now = dt.datetime(2000, 1, 1, 0, 10, 0, tzinfo=dt.timezone.utc)
def handler(event, l, e):
l.append(event)
e.set()
class TestClusterManager(OneNodeMixin, TestCase):
def setUp(self) -> None:
super().setUp()
self.new_event = threading.Event()
self.mock_queue = mock.Mock()
self.mock_dm = mock.Mock()
self.mock_dm.flask_app = self.app
self.mock_dm.engine = db.engine
self.mock_dm.manager.dict.return_value = dict()
self.mock_dm.server_id = self.s1.id
self.cm = ClusterManager("Cluster", startup_event=threading.Event(), shutdown_event=threading.Event(),
publish_q=self.mock_queue, event_q=None, dimensigon=self.mock_dm)
def tearDown(self) -> None:
self.cm._notify_cluster_out = mock.Mock()
self.cm.shutdown()
super().tearDown()
def test_main_func(self):
self.cm._send_data = mock.Mock(spec=self.cm._send_data)
# New Event
self.cm.put(1, keepalive=now)
self.cm.main_func()
self.cm.publish_q.safe_put.assert_called_once()
self.assertIsInstance(self.mock_queue.safe_put.call_args[0][0], NewEvent)
self.assertEqual(1, self.mock_queue.safe_put.call_args[0][0].args[0])
self.assertEqual(_Entry(1, now, False, False), self.cm._registry.get(1))
self.cm.zombie_threshold = dt.timedelta(seconds=0.05)
now2 = now + dt.timedelta(minutes=1)
self.mock_queue.reset_mock()
# KeepAlive Event
self.cm.put(1, now2)
self.cm.main_func()
self.cm.publish_q.safe_put.assert_not_called()
self.assertEqual(_Entry(1, now2, False, False), self.cm._registry.get(1))
self.mock_queue.reset_mock()
# Zombie Event
item = self.cm.queue.get(block=True)
self.cm.queue.put(item)
self.cm.main_func()
self.cm.publish_q.safe_put.assert_called_once()
self.assertIsInstance(self.mock_queue.safe_put.call_args[0][0], ZombieEvent)
self.assertEqual(1, self.mock_queue.safe_put.call_args[0][0].args[0])
self.assertEqual(_Entry(1, now2, False, True), self.cm._registry.get(1))
self.cm.zombie_threshold = dt.timedelta(seconds=180)
now3 = now2 + dt.timedelta(minutes=1)
self.mock_queue.reset_mock()
# Alive Event
self.cm.put(1, keepalive=now3)
self.cm.main_func()
self.cm.publish_q.safe_put.assert_called_once()
self.assertIsInstance(self.mock_queue.safe_put.call_args[0][0], AliveEvent)
self.assertEqual(1, self.mock_queue.safe_put.call_args[0][0].args[0])
self.assertEqual(_Entry(1, now3, False, False), self.cm._registry.get(1))
now4 = now3 + dt.timedelta(minutes=2)
self.mock_queue.reset_mock()
# Death Event
self.cm.put(1, now4, True)
self.cm.main_func()
self.cm.publish_q.safe_put.assert_called_once()
self.assertIsInstance(self.mock_queue.safe_put.call_args[0][0], DeathEvent)
self.assertEqual(1, self.mock_queue.safe_put.call_args[0][0].args[0])
self.assertEqual(_Entry(1, now4, True, False), self.cm._registry.get(1))
def test_get_alive(self):
self.cm.put(1, now)
self.cm.put(2, now)
self.cm.put(3, now)
for _ in range(3):
self.cm.main_func()
self.assertListEqual([1, 2, 3, self.s1.id], self.cm.get_alive())
self.cm._registry[2].death = True
self.cm._registry[3].zombie = True
self.assertListEqual([1, self.s1.id], self.cm.get_alive())
def test_get_zombies(self):
self.cm.put(1, now)
self.cm.main_func()
self.assertListEqual([], self.cm.get_zombies())
self.cm._registry[1].zombie = True
self.assertListEqual([1], self.cm.get_zombies())
@mock.patch('dimensigon.use_cases.cluster.get_now')
def test_get_cluster(self, mock_get_now):
mock_get_now.return_value = now
self.cm.put(1, now)
self.cm.put(2, now)
self.cm.put(3, now)
for _ in range(3):
self.cm.main_func()
self.assertListEqual([(1, now, False), (2, now, False), (3, now, False), (self.s1.id, now, False)],
self.cm.get_cluster())
self.cm._registry[2].death = True
self.cm._registry[3].zombie = True
self.assertListEqual([(1, now, False), (2, now, True), (3, now, False), (self.s1.id, now, False)],
self.cm.get_cluster())
self.assertListEqual([(1, now.strftime(defaults.DATETIME_FORMAT), False),
(2, now.strftime(defaults.DATETIME_FORMAT), True),
(3, now.strftime(defaults.DATETIME_FORMAT), False),
(self.s1.id, now.strftime(defaults.DATETIME_FORMAT), False)],
self.cm.get_cluster(str_format=defaults.DATETIME_FORMAT))
def test___contains__(self):
self.cm.put(1, now)
self.cm.put(2, now)
self.cm.put(3, now)
for _ in range(3):
self.cm.main_func()
self.assertTrue(1 in self.cm)
self.assertTrue(2 in self.cm)
self.assertTrue(3 in self.cm)
self.assertTrue(self.s1.id in self.cm)
self.cm._registry[2].death = True
self.cm._registry[3].zombie = True
self.assertTrue(1 in self.cm)
self.assertFalse(2 in self.cm)
self.assertFalse(3 in self.cm)
self.assertTrue(self.s1.id in self.cm)
@mock.patch('dimensigon.use_cases.cluster.threading', )
@mock.patch('dimensigon.use_cases.cluster.ntwrk.parallel_requests', spec=AsyncMock)
@mock.patch('dimensigon.use_cases.cluster.Server.get_neighbours')
@mock.patch('dimensigon.use_cases.cluster.get_root_auth')
def test__send_data(self, mock_get_root_auth, mock_get_neighbours, mock_parallel_requests, mock_threading):
async def parallel_responses(responses):
return responses
mock_get_neighbours.return_value = [self.s1]
mock_get_root_auth.return_value = 'auth'
mock_parallel_requests.return_value = parallel_responses([ntwrk.Response(code=200, server=self.s1)])
self.cm.put(1, now)
self.cm.put(2, now, death=True)
for _ in range(2):
self.cm.main_func()
self.cm._send_data()
self.assertEqual(0, len(self.cm._buffer))
mock_parallel_requests.assert_called_once_with(mock_get_neighbours.return_value, 'POST',
view_or_url='api_1_0.cluster',
json=[dict(id=1, death=False,
keepalive=now.strftime(defaults.DATEMARK_FORMAT)),
dict(id=2, death=True,
keepalive=now.strftime(defaults.DATEMARK_FORMAT)),
], auth=mock_get_root_auth.return_value, securizer=False
)
with self.subTest("Error sending data"):
async def parallel_responses(responses):
raise Exception()
self.assertEqual(0, len(self.cm._buffer))
mock_parallel_requests.return_value = parallel_responses(None)
self.cm.put(1, now + dt.timedelta(minutes=1))
self.cm.put(2, now + dt.timedelta(minutes=1), death=True)
for _ in range(2):
self.cm.main_func()
self.assertIsNotNone(self.cm._buffer)
self.cm._send_data()
self.assertIsNotNone(self.cm._buffer)
@mock.patch('dimensigon.use_cases.cluster.get_now')
@mock.patch('dimensigon.use_cases.cluster.ntwrk.post')
@mock.patch('dimensigon.use_cases.cluster.get_root_auth')
def test__notify_cluster_in(self, mock_get_root_auth, mock_post, mock_get_now):
mock_get_root_auth.return_value = 'auth'
mock_post.side_effect = [ntwrk.Response(code=200, msg={
'cluster': [(1, now.strftime(defaults.DATEMARK_FORMAT), False),
(2, now.strftime(defaults.DATEMARK_FORMAT), False)],
'neighbours': [1, 2]})]
mock_get_now.return_value = now
s2 = Server('node2', port=5000)
s2.set_route(None, gate=s2.gates[0], cost=0)
db.session.add(s2)
self.cm._route_initiated = mock.Mock()
self.cm._notify_cluster_in()
self.cm.main_func()
self.assertDictEqual({1: _Entry(id=1, keepalive=now, death=False),
2: _Entry(id=2, keepalive=now, death=False)},
self.cm._registry)
routes = [s2.route.to_json()]
mock_post.assert_called_once_with(s2, 'api_1_0.cluster_in', view_data=dict(server_id=str(self.s1.id)),
json=dict(keepalive=now.strftime(defaults.DATEMARK_FORMAT), routes=routes)
, timeout=10, auth='auth')
@mock.patch('dimensigon.use_cases.cluster.get_now')
@mock.patch('dimensigon.use_cases.cluster.ntwrk.parallel_requests', spec=AsyncMock)
@mock.patch('dimensigon.use_cases.cluster.Server.get_neighbours')
@mock.patch('dimensigon.use_cases.cluster.get_root_auth')
def test__notify_cluster_out(self, mock_get_root_auth, mock_get_neighbours, mock_parallel_requests, mock_get_now):
async def parallel_responses(responses):
return responses
mock_get_root_auth.return_value = 'auth'
mock_get_neighbours.return_value = [self.s1]
with self.subTest("Successful cluster out message"):
mock_parallel_requests.return_value = parallel_responses([ntwrk.Response(code=200)])
mock_get_now.return_value = now
self.cm._notify_cluster_out()
mock_parallel_requests.assert_called_once_with([self.s1], 'post',
view_or_url='api_1_0.cluster_out',
view_data=dict(server_id=str(self.s1.id)),
json={'death': now.strftime(defaults.DATEMARK_FORMAT)},
timeout=2, auth='auth')
with self.subTest("Error sending cluster out message"):
mock_get_neighbours.return_value = [self.s1]
mock_parallel_requests.reset_mock()
mock_parallel_requests.return_value = parallel_responses([ntwrk.Response(code=500, server=self.s1)])
self.cm.logger.warning = mock.Mock()
self.cm._notify_cluster_out()
self.cm.logger.warning.assert_called_once_with(
f"Unable to send data to {self.s1}: {ntwrk.Response(code=500, server=self.s1)}")
with self.subTest("No servers to send message"):
mock_get_neighbours.return_value = []
mock_parallel_requests.reset_mock()
self.cm._notify_cluster_out()
mock_parallel_requests.assert_not_called()
| 43.403704
| 118
| 0.610718
|
d9c2a1abb00ef7976e540b4b5feff407ac5bd648
| 365
|
py
|
Python
|
profile_project/profile_app/urls.py
|
nitesh-01/django-backend-rest-api
|
06140200ab3a5ef5ab035f8f49c575e448df609b
|
[
"MIT"
] | null | null | null |
profile_project/profile_app/urls.py
|
nitesh-01/django-backend-rest-api
|
06140200ab3a5ef5ab035f8f49c575e448df609b
|
[
"MIT"
] | null | null | null |
profile_project/profile_app/urls.py
|
nitesh-01/django-backend-rest-api
|
06140200ab3a5ef5ab035f8f49c575e448df609b
|
[
"MIT"
] | null | null | null |
from django.urls import path,include
from rest_framework.routers import DefaultRouter
from profile_app import views
router = DefaultRouter()
router.register('view_set', views.Helloviewset, basename='view_set')
router.register('profile', views.UserProfileViewset)
urlpatterns = [
path('hello/',views.helloview.as_view()),
path('',include(router.urls)),
]
| 26.071429
| 68
| 0.769863
|
a053059619a6f407e7ce74f0abc14d7fd7593164
| 1,324
|
py
|
Python
|
docs/extensions/natbib/directives.py
|
mwibrow/vlnm
|
f3fc6ba1a207431bc12a457aab4ab847a584a66f
|
[
"AFL-3.0"
] | null | null | null |
docs/extensions/natbib/directives.py
|
mwibrow/vlnm
|
f3fc6ba1a207431bc12a457aab4ab847a584a66f
|
[
"AFL-3.0"
] | 4
|
2018-09-25T06:57:29.000Z
|
2019-09-09T13:22:00.000Z
|
docs/extensions/natbib/directives.py
|
mwibrow/vlnm
|
f3fc6ba1a207431bc12a457aab4ab847a584a66f
|
[
"AFL-3.0"
] | 1
|
2020-10-20T05:42:45.000Z
|
2020-10-20T05:42:45.000Z
|
"""
New Directives
~~~~~~~~~~~~~~
"""
import os
from pybtex.database.input import bibtex
from docutils.parsers.rst import directives, Directive
from .nodes import BibliographyNode
class BibliographyDirective(Directive):
"""Class for processing the :rst:dir:`bibliography` directive.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
has_content = False
option_spec = {
'all': directives.flag,
'style': directives.unchanged
}
def run(self):
"""Process .bib files, set file dependencies, and create a
node that is to be transformed to the entries of the
bibliography.
"""
env = self.state.document.settings.env
id_ = 'bibtex-bibliography-{}-{}'.format(
env.docname, env.new_serialno('bibtex'))
for bibfile in self.arguments[0].split():
bibfile = os.path.normpath(env.relfn2path(bibfile.strip())[1])
parser = bibtex.Parser()
cache = parser.parse_file(bibfile)
env.bibcache.add_entries(cache.entries, env.docname)
data = dict(
docname=env.docname,
style=self.options.get('style'),
all='all' in self.options)
return [BibliographyNode('', ids=[id_], data=data)]
| 29.422222
| 74
| 0.620091
|
3a9e284ff7d956a7c8c6c8b828054ca1bc763b19
| 12,108
|
py
|
Python
|
txtrader/tcpserver.py
|
rstms/txTrader
|
3120eb47f10979e90c48cb66543378084bae624a
|
[
"MIT"
] | 43
|
2015-03-30T15:20:00.000Z
|
2022-02-15T18:25:54.000Z
|
txtrader/tcpserver.py
|
rstms/txTrader
|
3120eb47f10979e90c48cb66543378084bae624a
|
[
"MIT"
] | 12
|
2015-08-05T17:36:28.000Z
|
2020-05-03T23:23:42.000Z
|
txtrader/tcpserver.py
|
rstms/txTrader
|
3120eb47f10979e90c48cb66543378084bae624a
|
[
"MIT"
] | 25
|
2015-11-04T03:08:57.000Z
|
2021-08-07T09:47:37.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tcpserver.py
------------
TxTrader TCP server module - Implement ASCII line oriented event interface.
Copyright (c) 2015 Reliance Systems Inc. <mkrueger@rstms.net>
Licensed under the MIT license. See LICENSE for details.
"""
from txtrader import VERSION, DATE, LABEL
import sys
import os
from twisted.internet.protocol import Factory
from twisted.internet import reactor, defer
from twisted.protocols import basic
from socket import gethostname
import ujson as json
import traceback
# set 512MB line buffer
LINE_BUFFER_LENGTH = 0x20000000
class tcpserver(basic.NetstringReceiver):
MAX_LENGTH = LINE_BUFFER_LENGTH
def __init__(self):
self.commands = {
'auth': self.cmd_auth,
'help': self.cmd_help,
'quit': self.cmd_disconnect,
'exit': self.cmd_disconnect,
'bye': self.cmd_disconnect,
'status': self.cmd_status,
'getbars': self.cmd_getbars,
'marketorder': self.cmd_market_order,
'stagemarketorder': self.cmd_stage_market_order,
'stoporder': self.cmd_stop_order,
'limitorder': self.cmd_limit_order,
'stoplimitorder': self.cmd_stoplimit_order,
'add': self.cmd_add,
'del': self.cmd_del,
'query': self.cmd_query,
'querydata': self.cmd_query_data,
'symbols': self.cmd_symbols,
'positions': self.cmd_positions,
'orders': self.cmd_orders,
'tickets': self.cmd_tickets,
'executions': self.cmd_executions,
'globalcancel': self.cmd_global_cancel,
'cancel': self.cmd_cancel,
'setaccount': self.cmd_setaccount,
'accounts': self.cmd_accounts,
'shutdown': self.cmd_shutdown,
}
self.authmap = set([])
self.options = {}
def stringReceived(self, line):
line = line.decode().strip()
self.factory.output(
'user command: %s' % ('%s xxxxxxxxxxx' % ' '.join(line.split()[:2]) if line.startswith('auth') else line)
)
if line:
cmd = line.split()[0]
if cmd in self.commands.keys():
try:
response = self.commands[cmd](line)
except Exception as exc:
self.factory.api.error_handler(self, repr(exc))
traceback.print_exc()
response = f'.error: {repr(exc)}'
self.send(response)
self.factory.api.check_exception_halt(exc, self)
else:
if response:
self.send(response)
else:
self.send('.what?')
def send(self, line):
if len(line) > self.MAX_LENGTH:
self.factory.api.force_disconnect(
f"NetstringReceiver: cannot send message of length {len(line)} {repr(line[:64])}..."
)
else:
return self.sendString(line.encode())
def cmd_auth(self, line):
auth, username, password = (line).split()[:3]
options_field = (line[len(auth) + len(username) + len(password) + 3:]).strip()
if not options_field.startswith('{'):
# legacy options are in string format: i.e. 'noquotes notrades'; convert to dict
if options_field == 'noquotes notrades':
# legacy clients sending "noquotes notrades" expect a default of {'order-notification': True}
self.options = {'order-notification': True}
self.factory.api.output(f"Setting legacy client options to: {repr(self.options)}")
else:
self.options = {o: True for o in options_field.strip().split()}
else:
self.options = json.loads(options_field) if options_field else {}
if self.factory.validate(username, password):
self.authmap.add(self.transport.getPeer())
self.factory.api.open_client(self)
return '.Authorized %s' % self.factory.api.channel
else:
self.check_authorized()
def check_authorized(self):
authorized = self.transport.getPeer() in self.authmap
if not authorized:
self.send('.Authorization required!')
self.factory.api.close_client(self)
self.transport.loseConnection()
return authorized
def check_initialized(self):
initialized = self.factory.api.initialized
if not initialized:
self.send('.Initialization not complete!')
self.factory.api.close_client(self)
self.transport.loseConnection()
return initialized
def cmd_shutdown(self, line):
if self.check_authorized():
self.factory.output('client at %s requested shutdown: %s' % (self.transport.getPeer(), line))
self.factory.api.close_client(self)
reactor.callLater(0, reactor.stop)
def cmd_help(self, line):
self.send('.commands: %s' % repr(self.commands.keys()))
def cmd_disconnect(self, line):
self.authmap.discard(self.transport.getPeer())
self.transport.loseConnection()
def cmd_status(self, line):
self.send('.status: %s' % self.factory.api.query_connection_status())
def cmd_setaccount(self, line):
if self.check_authorized() and self.check_initialized():
setaccount, account = line.split()[:2]
self.factory.api.set_account(account, self.send)
def cmd_accounts(self, line):
if self.check_authorized() and self.check_initialized():
self.send('.accounts: %s' % self.factory.api.accounts)
self.factory.api.request_accounts(self.defer_response(self.send_response, 'accounts'))
def cmd_getbars(self, line):
if self.check_authorized() and self.check_initialized():
_, symbol, period, start_date, start_time, end_date, end_time = line.split()[:7]
self.factory.api.query_bars(
symbol, period, ' '.join((start_date, start_time)), ' '.join((end_date, end_time)), self.send
)
def cmd_add(self, line):
if self.check_authorized() and self.check_initialized():
_, symbol = line.split()[:2]
symbol = symbol.upper()
self.factory.api.symbol_enable(symbol, self, self.defer_response(self.send_response, 'symbol'))
#self.send(f".symbol: {symbol} added")
def cmd_del(self, line):
if self.check_authorized() and self.check_initialized():
_, symbol = line.split()[:2]
symbol = symbol.upper()
self.factory.api.symbol_disable(symbol, self, self.defer_response(self.send_response, 'symbol'))
#self.send(f".symbol: {symbol} deleted")
def cmd_query(self, line):
if self.check_authorized() and self.check_initialized():
_, symbol = line.split()[:2]
symbol = symbol.upper()
self.send_response(json.dumps(self._symbol_fields(symbol)), 'symbol')
def cmd_query_data(self, line):
if self.check_authorized() and self.check_initialized():
_, symbol = line.split()[:2]
symbol = symbol.upper()
self.send_response(json.dumps(self._symbol_fields(symbol, raw=True)), 'symbol-data')
def _symbol_fields(self, symbol, raw=False):
if raw:
symbol_fields = self.factory.api.symbols[symbol].rawdata
else:
symbol_fields = self.factory.api.symbols[symbol].export(self.options.get('SYMBOL_FIELDS', None))
return symbol_fields
def cmd_market_order(self, line):
if self.check_authorized() and self.check_initialized():
_, account, route, symbol, qstr = line.split()[:5]
self.factory.api.market_order(account, route, symbol, int(qstr), self.send)
def cmd_stage_market_order(self, line):
if self.check_authorized() and self.check_initialized():
_, tag, account, route, symbol, qstr = line.split()[:6]
self.factory.api.stage_market_order(tag, account, route, symbol, int(qstr), self.send)
def cmd_stop_order(self, line):
if self.check_authorized() and self.check_initialized():
_order, account, route, symbol, price, qstr = line.split()[:6]
self.factory.api.stop_order(account, route, symbol, float(price), int(qstr), self.send)
def cmd_limit_order(self, line):
if self.check_authorized() and self.check_initialized():
_, account, route, symbol, price, qstr = line.split()[:6]
self.factory.api.limit_order(account, route, symbol, float(price), int(qstr), self.send)
def cmd_stoplimit_order(self, line):
if self.check_authorized() and self.check_initialized():
_, account, route, symbol, stop_price, limit_price, qstr = line.split()[:7]
self.factory.api.stoplimit_order(
account, route, symbol, float(stop_price), float(limit_price), int(qstr), self.send
)
def cmd_cancel(self, line):
if self.check_authorized() and self.check_initialized():
_, _id = line.split()[:2]
self.factory.api.cancel_order(_id, self.send)
def cmd_symbols(self, line):
if self.check_authorized() and self.check_initialized():
symbols = {s: self._symbol_fields(s) for s in s.self.factory.api.symbols}
self.send_response(json.dumps(symbols), 'symbols')
def cmd_positions(self, line):
if self.check_authorized() and self.check_initialized():
self.factory.api.request_positions(self.defer_response(self.send_response, 'positions'))
def cmd_orders(self, line):
if self.check_authorized() and self.check_initialized():
self.factory.api.request_orders(self.defer_response(self.send_response, 'orders'))
def cmd_tickets(self, line):
if self.check_authorized() and self.check_initialized():
self.factory.api.request_tickets(self.defer_response(self.send_response, 'tickets'))
def cmd_executions(self, line):
if self.check_authorized() and self.check_initialized():
self.factory.api.request_executions(self.defer_response(self.send_response, 'executions'))
def cmd_global_cancel(self, line):
if self.check_authorized() and self.check_initialized():
self.factory.api.request_global_cancel()
self.send('.global order cancel requested')
def connectionMade(self):
self.factory.output('client connection from %s' % self.transport.getPeer())
self.authmap.discard(self.transport.getPeer())
self.send(
'.connected: %s %s %s %s on %s' % (self.factory.api.label, str(VERSION), str(DATE), str(LABEL), str(gethostname()))
)
def connectionLost(self, reason):
self.factory.output('client connection from %s lost: %s' % (self.transport.getPeer(), repr(reason)))
self.authmap.discard(self.transport.getPeer())
self.factory.api.close_client(self)
def send_response(self, data, label):
self.send(f'{self.factory.api.channel}.{label}: {data}')
def defer_response(self, sender, command):
d = defer.Deferred()
d.addCallback(sender, command)
d.addErrback(self.api_error)
d.addErrback(self.api_timeout)
return d
def api_timeout(self, failure):
self.send(f'alert: API timeout errback: {repr(failure)}')
return failure
def api_error(self, failure):
self.send(f'alert: API errback: {repr(failure)}')
return failure
class serverFactory(Factory):
protocol = tcpserver
def __init__(self, api):
self.api = api
self.output = api.output
def validate(self, username, password):
return username == self.api.username and password == self.api.password
def buildProtocol(self, addr):
self.output(f'buildProtocol: addr={addr}')
return super().buildProtocol(addr)
| 40.36
| 127
| 0.62149
|
ab8a8d93085679e127bdb0cf2a89ceedbd9a3445
| 2,465
|
py
|
Python
|
tests/test_get_session.py
|
py-steph/aiohttp-session
|
5c900766e5d4f63e9776ceac31537c6160b10513
|
[
"Apache-2.0"
] | 169
|
2016-11-14T10:17:42.000Z
|
2022-03-08T16:19:00.000Z
|
tests/test_get_session.py
|
py-steph/aiohttp-session
|
5c900766e5d4f63e9776ceac31537c6160b10513
|
[
"Apache-2.0"
] | 587
|
2016-10-24T11:43:37.000Z
|
2022-03-28T00:13:39.000Z
|
tests/test_get_session.py
|
py-steph/aiohttp-session
|
5c900766e5d4f63e9776ceac31537c6160b10513
|
[
"Apache-2.0"
] | 82
|
2016-11-16T03:57:33.000Z
|
2022-01-21T07:12:41.000Z
|
import pytest
from aiohttp import web
from aiohttp.test_utils import make_mocked_request
from aiohttp_session import (AbstractStorage, SESSION_KEY, STORAGE_KEY, Session,
get_session, new_session)
async def test_get_stored_session() -> None:
req = make_mocked_request('GET', '/')
session = Session('identity', data=None, new=False)
req[SESSION_KEY] = session
ret = await get_session(req)
assert session is ret
async def test_session_is_not_stored() -> None:
req = make_mocked_request('GET', '/')
with pytest.raises(RuntimeError):
await get_session(req)
async def test_storage_returns_not_session_on_load_session() -> None:
req = make_mocked_request('GET', '/')
class Storage():
async def load_session(self, request: web.Request) -> None:
return None
req[STORAGE_KEY] = Storage()
with pytest.raises(RuntimeError):
await get_session(req)
async def test_get_new_session() -> None:
req = make_mocked_request('GET', '/')
session = Session('identity', data=None, new=False)
class Storage(AbstractStorage):
async def load_session(self, request: web.Request): # type: ignore[no-untyped-def]
pass
async def save_session(
self,
request: web.Request,
response: web.StreamResponse,
session: Session
) -> None:
pass
req[SESSION_KEY] = session
req[STORAGE_KEY] = Storage()
ret = await new_session(req)
assert ret is not session
async def test_get_new_session_no_storage() -> None:
req = make_mocked_request('GET', '/')
session = Session('identity', data=None, new=False)
req[SESSION_KEY] = session
with pytest.raises(RuntimeError):
await new_session(req)
async def test_get_new_session_bad_return() -> None:
req = make_mocked_request('GET', '/')
class Storage(AbstractStorage):
async def new_session(self): # type: ignore[no-untyped-def]
return ''
async def load_session(self, request: web.Request) -> Session:
return Session(None, data=None, new=True)
async def save_session(
self,
request: web.Request,
response: web.StreamResponse,
session: Session
) -> None:
pass
req[STORAGE_KEY] = Storage()
with pytest.raises(RuntimeError):
await new_session(req)
| 27.087912
| 91
| 0.639351
|
e9df6eeb91c6a4a02a65019c6726478498fad476
| 691
|
py
|
Python
|
gwlfe/Input/Animals/TotAEU.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | null | null | null |
gwlfe/Input/Animals/TotAEU.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | 6
|
2018-07-24T22:46:28.000Z
|
2018-07-29T19:13:09.000Z
|
gwlfe/Input/Animals/TotAEU.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | 1
|
2018-07-24T18:22:01.000Z
|
2018-07-24T18:22:01.000Z
|
from numpy import sum
def TotAEU(NumAnimals, AvgAnimalWt):
result = 0
aeu1 = ((NumAnimals[2] / 2) * (AvgAnimalWt[2]) / 1000) + ((NumAnimals[3] / 2) * (AvgAnimalWt[3]) / 1000)
aeu2 = (NumAnimals[7] * AvgAnimalWt[7]) / 1000
aeu3 = (NumAnimals[5] * AvgAnimalWt[5]) / 1000
aeu4 = (NumAnimals[4] * AvgAnimalWt[4]) / 1000
aeu5 = (NumAnimals[6] * AvgAnimalWt[6]) / 1000
aeu6 = (NumAnimals[0] * AvgAnimalWt[0]) / 1000
aeu7 = (NumAnimals[1] * AvgAnimalWt[1]) / 1000
result += aeu1 + aeu2 + aeu3 + aeu4 + aeu5 + aeu6 + aeu7
return result
def TotAEU_f(NumAnimals, AvgAnimalWt):
aeu = NumAnimals * AvgAnimalWt / 1000
aeu[2:4] /= 2
return sum(aeu)
| 32.904762
| 108
| 0.615051
|
03a3005069812583b8c84663185e334bc549a38a
| 2,113
|
py
|
Python
|
models.py
|
meet-projects/MEET-site-aj
|
9a68c1e759ac228fe0276cf89c24a840f8d5e687
|
[
"MIT"
] | null | null | null |
models.py
|
meet-projects/MEET-site-aj
|
9a68c1e759ac228fe0276cf89c24a840f8d5e687
|
[
"MIT"
] | 4
|
2021-02-08T20:33:03.000Z
|
2021-12-13T20:02:37.000Z
|
models.py
|
meet-projects/MEET-site-aj
|
9a68c1e759ac228fe0276cf89c24a840f8d5e687
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, Integer, ForeignKey, String, Enum, Boolean
from sqlalchemy.types import DateTime
from sqlalchemy.ext.declarative import declarative_base
import time
import enum
from werkzeug.security import generate_password_hash, \
check_password_hash
class Access(enum.Enum):
YearOne = 1
YearTwo = 2
YearThree = 3
Alum = 4
Admin = 5
SuperAdmin = 6
models_access = {
Access.YearOne: "y1",
Access.YearTwo: "y2",
Access.YearThree: "y3",
Access.Alum: "alum",
Access.Admin: "admin",
Access.SuperAdmin: "superadmin",
}
# above but reversed
access_models = {j:i for i, j in models_access.items()}
Base = declarative_base()
class User(Base):
__tablename__ = "users"
# id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String)
email = Column(String, primary_key=True, nullable=False)
password = Column(String)
access = Column(Enum(Access))
authenticated = Column(Boolean, default=False)
location = Column(String)
class Lecture(Base):
__tablename__ = "lectures"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String)
link = Column(String)
group = Column(String)
assign_type = Column(String)
lec_type = Column(String, default="slides")
location = Column(String)
class Assignment(Base):
__tablename__ = "assignments"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String)
group = Column(String)
document = Column(String)
due = Column(DateTime)
class CompAssignment(Base):
__tablename__ = "completed_assignments"
id = Column(Integer, primary_key=True, autoincrement=True)
student = Column(String, ForeignKey("users.email"))
link = Column(String)
completed = Column(DateTime)
class Announcement(Base):
__tablename__ = "announcements"
id = Column(Integer, primary_key=True, autoincrement=True)
announced = Column(DateTime)
name = Column(String)
text = Column(String)
poster = Column(String)
group = Column(String, nullable=True)
| 25.457831
| 73
| 0.69806
|
d99502cae623ed43382414708a5bb4518ff1eab0
| 6,104
|
py
|
Python
|
qa/rpc-tests/proxy_test.py
|
MOBInodecoin/Mobinode
|
c4d3f0c97c653b0d2b7a2f66f376d7ea85077001
|
[
"MIT"
] | 3
|
2019-03-23T16:58:12.000Z
|
2019-11-13T08:46:52.000Z
|
qa/rpc-tests/proxy_test.py
|
MOBInodecoin/Mobinode
|
c4d3f0c97c653b0d2b7a2f66f376d7ea85077001
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/proxy_test.py
|
MOBInodecoin/Mobinode
|
c4d3f0c97c653b0d2b7a2f66f376d7ea85077001
|
[
"MIT"
] | 3
|
2019-02-26T20:41:05.000Z
|
2019-04-01T12:07:37.000Z
|
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import socket
import traceback, sys
from binascii import hexlify
import time, os
from socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework import BitcoinTestFramework
from util import *
'''
Test plan:
- Start bitcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on bitcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create bitcoinds that connect to them
- Manipulate the bitcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(BitcoinTestFramework):
def __init__(self):
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
return start_nodes(4, self.options.tmpdir, extra_args=[
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0']
])
def node_test(self, node, proxies, auth):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing onion connection through node
node.addnode("mobinodevj7kcklujarx.onion:12219", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "mobinodevj7kcklujarx.onion")
assert_equal(cmd.port, 12219)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:8333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, "node.noumenon")
assert_equal(cmd.port, 8333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), 4)
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.808219
| 145
| 0.653342
|
128a3711119bc71fefa4c8cbf3878083475f2e1f
| 28,947
|
py
|
Python
|
core/people/person.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | 1
|
2016-10-08T09:01:05.000Z
|
2016-10-08T09:01:05.000Z
|
core/people/person.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | 1
|
2019-09-24T09:56:52.000Z
|
2019-09-24T09:56:52.000Z
|
core/people/person.py
|
vsilent/smarty-bot
|
963cba05433be14494ba339343c9903ccab3c37d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sqlalchemy import create_engine, Table
from sqlalchemy import Column, Integer, String, DateTime, Date
from sqlalchemy import MetaData, UnicodeText, text, Index
from sqlalchemy.orm import mapper, sessionmaker, scoped_session
#from sqlalchemy.sql import select
import uuid
import hashlib
from core.config.settings import logger
from core.config.settings import DB
import datetime
engine = create_engine(DB, echo=False, pool_recycle=3600)
metadata = MetaData()
db_session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
#metadata = MetaData(bind=engine, reflect=True)
account_table = Table(
'account',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(36)),
Column('password', String(200)),
mysql_engine='InnoDB',
mysql_charset='utf8')
profile_table = Table(
'account_profile',
metadata,
Column('id', Integer, primary_key=True),
Column('account_id', String(36), primary_key=True),
Column('uuid', String(255)),
Column('email', String(255), nullable=True),
Column('first_name', String(255), nullable=True),
Column('last_name', String(255), nullable=True),
Column('birthdate', Date, nullable=True),
Column('birthplace', String(255), nullable=True),
Column('nationality', String(255)),
Column('nickname', String(24), nullable=True),
Column('gender', String(255), nullable=True),
Column(
'priority', String(11),
nullable=True,
default=text(u"'0'"),
server_default=text('NULL')),
Column('type', String(8), nullable=True),
Column('age', Integer(), nullable=True, default=text(u"'0'")),
Column('sign', String(11), nullable=True),
Column('first_login', Integer(), nullable=True, default=text(u"'0'")),
Column('last_login', Integer(), nullable=True, default=text(u"'0'")),
Column('registered', Integer(), nullable=True, default=text(u"'0'")),
Column('updated', Integer(), nullable=True, default=text(u"'0'")),
Column('homepage', String(255), nullable=True),
Column('home_country', String(255), nullable=True),
Column('home_city', String(255), nullable=True),
Column('home_state', String(255), nullable=True),
Column('home_street', String(255), nullable=True),
Column('home_house', String(255), nullable=True),
Column('home_apartment', String(255), nullable=True),
Column('home_postcode', String(255), nullable=True),
Column('home_phone', String(255), nullable=True),
Column('work_country', String(255), nullable=True),
Column('work_city', String(255), nullable=True),
Column('work_street', String(255), nullable=True),
Column('work_house', String(255), nullable=True),
Column('work_postcode', String(255), nullable=True),
Column('work_phone', String(255), nullable=True),
Column('mobile_phone', String(255), nullable=True),
Column('music', String(16), nullable=True),
Column('food', String(16), nullable=True),
Column('drink', String(16), nullable=True),
Column('location_id', Integer(), nullable=True),
Column('status', String(255), nullable=True),
Column('online', Integer(), nullable=True),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_table.c.id, unique=True)
Index('uuid', profile_table.c.uuid, unique=True)
Index('email', profile_table.c.email, unique=True)
Index('first_name', profile_table.c.first_name)
Index('last_name', profile_table.c.last_name)
Index('nickname', profile_table.c.nickname)
Index('homepage', profile_table.c.homepage)
Index('mobile_phone', profile_table.c.mobile_phone, unique=False)
Index('location_id', profile_table.c.location_id)
profile_role_table = Table(
'account_profile_role',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('role', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_role_table.c.id)
Index('uuid', profile_role_table.c.uuid)
Index('role', profile_role_table.c.role)
profile_social_table = Table(
'account_profile_social',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('service_id', UnicodeText()),
Column('service_name', String(255)),
Column('service_url', UnicodeText()),
Column('service_consumer_key', UnicodeText()),
Column('service_consumer_secret', UnicodeText()),
Column('service_access_token', UnicodeText()),
Column('service_scope', UnicodeText()),
Column('service_login', UnicodeText()),
Column('service_email', String(255)),
Column('service_password', UnicodeText()),
Column('notes', UnicodeText()),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_social_table.c.id)
Index('uuid', profile_social_table.c.uuid)
profile_interest_table = Table(
'account_interest_email',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('interest', UnicodeText()),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_interest_table.c.id)
Index('uuid', profile_interest_table.c.uuid)
profile_request_table = Table(
'account_profile_request',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('request', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_request_table.c.id)
Index('uuid', profile_request_table.c.uuid)
profile_comment_table = Table(
'account_profile_comment',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('comment', UnicodeText()),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_comment_table.c.id)
Index('uuid', profile_comment_table.c.uuid)
profile_relation_table = Table(
'account_profile_relation',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('type', String(255)),
Column('related_account', UnicodeText()),
Column('related_account_type', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_relation_table.c.id)
Index('uuid', profile_relation_table.c.uuid)
profile_other_name_table = Table(
'account_profile_other_name',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('name', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_other_name_table.c.id)
Index('uuid', profile_other_name_table.c.uuid)
Index('name', profile_other_name_table.c.name)
profile_email_table = Table(
'account_profile_email',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('email', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_email_table.c.id)
Index('uuid', profile_email_table.c.uuid)
Index('email', profile_email_table.c.email)
profile_picture_table = Table(
'account_profile_picture',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('picture', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_picture_table.c.id)
Index('uuid', profile_picture_table.c.uuid)
profile_phone_table = Table(
'account_profile_phone',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('phone', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_phone_table.c.id)
Index('uuid', profile_phone_table.c.uuid)
profile_cronjob_table = Table(
'account_profile_cronjob',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('job', String(255)),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_cronjob_table.c.id)
Index('uuid', profile_cronjob_table.c.uuid)
profile_device_table = Table(
'account_profile_device',
metadata,
Column('id', Integer, primary_key=True),
Column('device_id', Integer()),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_device_table.c.id, unique=True)
Index('device_id', profile_device_table.c.device_id, unique=True)
device_table = Table(
'device',
metadata,
Column('id', Integer, primary_key=True),
Column('serial', String(255), nullable=True),
Column('name', String(255)),
Column('type', String(255), nullable=True),
Column('model', String(255), nullable=True),
Column('make', String(255), nullable=True),
Column('built', String(255), nullable=True),
Column('family', String(255), nullable=True),
Column('desc', UnicodeText(), nullable=True),
Column('params', UnicodeText(), nullable=True),
Column('network_name', String(255), nullable=True),
Column('mac_address', String(255), nullable=True),
Column('location', String(255), nullable=True),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', device_table.c.id, unique=True)
Index('serial', device_table.c.serial, unique=True)
Index('name', device_table.c.name, unique=True)
Index('model', device_table.c.model, unique=True)
Index('mac_address', device_table.c.mac_address, unique=True)
country_table = Table(
'country',
metadata,
Column('id', Integer(), primary_key=True, nullable=False),
Column('iso', String(2), nullable=False),
Column('name', String(80), nullable=False),
Column('title', String(80), nullable=False),
Column('iso3', String(3)),
Column('numcode', Integer()),
mysql_engine='MyISAM',
mysql_charset='utf8')
profile_link_table = Table(
'account_profile_link',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('type', String(255), nullable=True),
Column('url', UnicodeText()),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_link_table.c.id, unique=True)
#Index('uuid', profile_link_table.c.uuid, unique=True)
#needed for registering working hours
profile_timesheet_table = Table(
'account_profile_timesheet',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid', String(255)),
Column('type', String(255), nullable=True),
Column('created', DateTime, default=datetime.datetime.now),
Column('spent', UnicodeText()),
mysql_engine='MyISAM',
mysql_charset='utf8')
Index('id', profile_timesheet_table.c.id, unique=True)
Index('uuid', profile_timesheet_table.c.id, unique=True)
company_table = Table(
'company_profile',
metadata,
Column('id', Integer, primary_key=True),
Column('name', String(255), nullable=False),
Column('domain', String(255), nullable=True),
Column('type', String(255), nullable=True),
Column('registered', DateTime, default=datetime.datetime.now),
Column('updated', DateTime, default=datetime.datetime.now),
Column('founded', Integer(), nullable=True, default=text(u"'0'")),
Column('homepage', String(255), nullable=True),
Column('country', String(255), nullable=True),
Column('city', String(255), nullable=True),
Column('state', String(255), nullable=True),
Column('street', String(255), nullable=True),
Column('house', String(255), nullable=True),
Column('postcode', String(255), nullable=True),
Column('phone', String(255), nullable=True),
Column('fax', String(255), nullable=True),
Column('location_id', Integer(), nullable=True),
Column('info', UnicodeText(), nullable=True),
Column('status', String(255), nullable=True),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', company_table.c.id, unique=True)
Index('name', company_table.c.name)
Index('domain', company_table.c.domain, unique=False)
company_member_table = Table(
'company_member_profile',
metadata,
Column('id', Integer, primary_key=True),
Column('company_id', String(255)),
Column('uuid', String(255)),
Column('email1', String(255), nullable=True),
Column('email2', String(255), nullable=True),
Column('registered', DateTime, default=datetime.datetime.now),
Column('updated', DateTime, default=datetime.datetime.now),
Column('homepage', String(255), nullable=True),
Column('country', String(255), nullable=True),
Column('city', String(255), nullable=True),
Column('state', String(255), nullable=True),
Column('street', String(255), nullable=True),
Column('office', String(255), nullable=True),
Column('postcode', String(255), nullable=True),
Column('phone1', String(255), nullable=True),
Column('phone2', String(255), nullable=True),
Column('phone3', String(255), nullable=True),
Column('cellphone1', String(255), nullable=True),
Column('cellphone2', String(255), nullable=True),
Column('cellphone3', String(255), nullable=True),
Column('fax1', String(255), nullable=True),
Column('fax2', String(255), nullable=True),
Column('profession', UnicodeText(), nullable=True),
Column('info', UnicodeText(), nullable=True),
Column('location_id', Integer(), nullable=True),
Column('status', String(255), nullable=True),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', company_member_table.c.id, unique=True)
Index('company_id', company_member_table.c.company_id, unique=True)
Index('email1', company_member_table.c.email1, unique=False)
profile_interaction_table = Table(
'profile_interaction_history',
metadata,
Column('id', Integer, primary_key=True),
Column('uuid1', String(255)),
Column('uuid2', String(255)),
Column('email1', String(255), nullable=True),
Column('email2', String(255), nullable=True),
Column('registered', DateTime, default=datetime.datetime.now),
Column('updated', DateTime, default=datetime.datetime.now),
Column('type', String(255), nullable=True),
Column('data', UnicodeText(), nullable=True),
Column('status', String(255), nullable=True),
mysql_engine='MyISAM',
mysql_charset='utf8'
)
Index('id', profile_interaction_table.c.id, unique=True)
Index('uuid1', profile_interaction_table.c.uuid1, unique=True)
Index('uuid2', profile_interaction_table.c.uuid2, unique=True)
Index('email1', profile_interaction_table.c.email1, unique=False)
Index('email2', profile_interaction_table.c.email2, unique=False)
Index('type', profile_interaction_table.c.type, unique=False)
class ProfileTimesheet(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.type = kwargs.pop('type')
self.created = kwargs.pop('created')
self.spent = kwargs.pop('spent')
def __repr__(self):
return "<ProfileTimesheet('%s')>" % (self.id)
class ProfileLink(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.type = kwargs.pop('type')
self.url = kwargs.pop('url')
def __repr__(self):
return "<ProfileLink('%s')>" % (self.url)
class Country(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id', '')
self.iso = kwargs.pop('iso', '')
self.name = kwargs.pop('name', '')
self.title = kwargs.pop('title', '')
self.iso3 = kwargs.pop('iso3', '')
self.numcode = kwargs.pop('numcode', '')
def __repr__(self):
return "<Country('%s')>" % (self.iso)
class Account(object):
def __init__(self, **kwargs):
#self.id = kwargs.pop('id', '')
self.uuid = kwargs.pop('uuid', '')
self.password = kwargs.pop('password', '')
def __repr__(self):
return "<Account('%s', '%s')>" % (self.id, self.uuid)
class Profile(object):
query = db_session.query_property()
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.email = kwargs.pop('email', '')
self.first_name = kwargs.pop('first_name', '')
self.last_name = kwargs.pop('last_name', '')
self.birthdate = kwargs.pop('birthdate', 0)
self.birthplace = kwargs.pop('birthplace', '')
self.nationality = kwargs.pop('nationality', '')
self.nickname = kwargs.pop('nickname', '')
self.gender = kwargs.pop('gender', '')
self.priority = kwargs.pop('priority', '')
self.type = kwargs.pop('type', '')
self.age = kwargs.pop('age', 0)
self.sign = kwargs.pop('sign', '')
self.first_login = kwargs.pop('first_login', '')
self.last_login = kwargs.pop('last_login', '')
self.registered = kwargs.pop('registered', 0)
self.updated = kwargs.pop('updated', 0)
self.homepage = kwargs.pop('homepage', '')
self.home_country = kwargs.pop('home_country', '')
self.home_city = kwargs.pop('home_city', '')
self.home_state = kwargs.pop('home_state', '')
self.home_street = kwargs.pop('home_street', '')
self.home_house = kwargs.pop('home_house', '')
self.home_apartment = kwargs.pop('home_apartment', '')
self.home_postcode = kwargs.pop('home_postcode', '')
self.home_phone = kwargs.pop('home_phone', '')
self.work_country = kwargs.pop('work_country', '')
self.work_city = kwargs.pop('work_city', '')
self.work_street = kwargs.pop('work_street', '')
self.work_house = kwargs.pop('work_house', '')
self.work_postcode = kwargs.pop('work_postcode', '')
self.work_phone = kwargs.pop('work_phone', '')
self.mobile_phone = kwargs.pop('mobile_phone', '')
self.food = kwargs.pop('food', '')
self.drink = kwargs.pop('drink', '')
self.music = kwargs.pop('music', '')
self.status = kwargs.pop('status', '')
self.online = kwargs.pop('online', 0)
def __repr__(self):
return "<Profile('%s'')>" % (self.email)
class ProfileRole(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.role = kwargs.pop('role')
def __repr__(self):
return "<ProfileRole('%s'')>" % (self.device_id)
class ProfileSocial(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.service_id = kwargs.pop('service_id')
self.service_name = kwargs.pop('service_name')
self.service_url = kwargs.pop('service_url', '')
self.service_consumer_key = kwargs.pop('service_consumer_key', '')
self.service_consumer_secret = kwargs.pop(
'service_consumer_secret', ''
)
self.service_access_token = kwargs.pop('service_access_token', '')
self.service_scope = kwargs.pop('service_scope', '')
self.service_login = kwargs.pop('service_login', '')
self.service_email = kwargs.pop('service_email', '')
self.service_password = kwargs.pop('service_password', '')
self.notes = kwargs.pop('notes', '')
def __repr__(self):
return "<ProfileSocial('%s'')>" % (self.uuid)
class ProfileOtherName(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.name = kwargs.pop('name')
def __repr__(self):
return "<ProfileOtherName('%s'')>" % (self.name)
class ProfileRelation(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.type = kwargs.pop('type')
self.related_account = kwargs.pop('related_account')
self.related_account_type = kwargs.pop('related_account_type')
def __repr__(self):
return "<ProfileSocial('%s'')>" % (self.uuid)
class ProfileEmail(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.email = kwargs.pop('email')
def __repr__(self):
return "<ProfileEmail('%s'')>" % (self.email)
class ProfilePicture(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.picture = kwargs.pop('picture')
def __repr__(self):
return "<ProfilePicture('%s'')>" % (self.uuid)
class ProfilePhone(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.phone = kwargs.pop('phone')
def __repr__(self):
return "<ProfilePhone('%s'')>" % (self.uuid)
class ProfileInterest(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.interest = kwargs.pop('interest')
def __repr__(self):
return "<ProfileInterest('%s'')>" % (self.uuid)
class ProfileRequest(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.type = kwargs.pop('type')
self.request = kwargs.pop('request')
def __repr__(self):
return "<ProfileRequest('%s'')>" % (self.uuid)
class ProfileDevice(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.device_id = kwargs.pop('device_id')
def __repr__(self):
return "<ProfileDevice('%s'')>" % (self.device_id)
class Device(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id')
self.device_name = kwargs.pop('device_name')
self.device_desc = kwargs.pop('device_desc')
self.device_type = kwargs.pop('device_type')
self.device_family = kwargs.pop('device_family')
self.device_model = kwargs.pop('device_model')
self.device_serial = kwargs.pop('device_serial')
self.device_make = kwargs.pop('device_make')
self.device_built = kwargs.pop('device_build')
self.device_params = kwargs.pop('device_params')
self.device_location = kwargs.pop('device_location')
def __repr__(self):
return "<Device('%s'')>" % (self.device_name)
class ProfileComment(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.comment = kwargs.pop('comment')
self.area = kwargs.pop('area')
def __repr__(self):
return "<ProfileComment('%s'')>" % (self.comment)
class ProfileCronjob(object):
def __init__(self, **kwargs):
self.uuid = kwargs.pop('uuid')
self.job = kwargs.pop('job')
def __repr__(self):
return "<ProfileComment('%s'')>" % (self.comment)
class Company(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id')
self.uuid = kwargs.pop('uuid')
self.name = kwargs.pop('name', '')
self.domain = kwargs.pop('domain', '')
self.type = kwargs.pop('type', '')
self.registered = kwargs.pop('registered', 0)
self.founded = kwargs.pop('founded', 0)
self.updated = kwargs.pop('updated', 0)
self.homepage = kwargs.pop('homepage', '')
self.country = kwargs.pop('country', '')
self.city = kwargs.pop('city', '')
self.state = kwargs.pop('state', '')
self.street = kwargs.pop('street', '')
self.house = kwargs.pop('house', '')
self.postcode = kwargs.pop('postcode', '')
self.phone = kwargs.pop('phone', '')
self.fax = kwargs.pop('fax', '')
self.location_id = kwargs.pop('house', '')
self.status = kwargs.pop('status', '')
def __repr__(self):
return "<Company('%s'')>" % (self.email)
class CompanyMember(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id')
self.uuid = kwargs.pop('uuid', '')
self.company_id = kwargs.pop('company_id', '')
self.email1 = kwargs.pop('email1', '')
self.email2 = kwargs.pop('email2', '')
self.registered = kwargs.pop('registered', 0)
self.updated = kwargs.pop('updated', 0)
self.homepage = kwargs.pop('homepage', '')
self.country = kwargs.pop('country', '')
self.city = kwargs.pop('city', '')
self.state = kwargs.pop('state', '')
self.street = kwargs.pop('street', '')
self.office = kwargs.pop('office', '')
self.postcode = kwargs.pop('postcode', '')
self.cellphone1 = kwargs.pop('cellphone1', '')
self.cellphone2 = kwargs.pop('cellphone2', '')
self.cellphone3 = kwargs.pop('cellphone3', '')
self.phone1 = kwargs.pop('phone1', '')
self.phone2 = kwargs.pop('phone2', '')
self.phone3 = kwargs.pop('phone3', '')
self.fax1 = kwargs.pop('fax1', '')
self.fax2 = kwargs.pop('fax2', '')
self.profession = kwargs.pop('profession', '')
self.info = kwargs.pop('info', '')
self.location_id = kwargs.pop('house', '')
self.status = kwargs.pop('status', '')
def __repr__(self):
return "<CompanyMember('%s'')>" % (self.uuid)
class ObjectLocation(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id')
self.obj_id = kwargs.pop('obj_id')
self.coord = kwargs.pop('coord', '')
class ProfileInteractionHistory(object):
def __init__(self, **kwargs):
self.id = kwargs.pop('id')
self.uuid1 = kwargs.pop('uuid1', '')
self.uuid2 = kwargs.pop('uuid2', '')
self.email1 = kwargs.pop('email1', '')
self.email2 = kwargs.pop('email2', '')
self.type = kwargs.pop('type', '')
self.data = kwargs.pop('data', '')
self.registered = kwargs.pop('registered', 0)
self.updated = kwargs.pop('updated', 0)
Session = sessionmaker(bind=engine)
#metadata = MetaData(bind=engine, reflect=True)
mapper(Account, account_table)
mapper(Profile, profile_table)
mapper(ProfileRole, profile_role_table)
mapper(ProfileEmail, profile_email_table)
mapper(ProfilePhone, profile_phone_table)
mapper(ProfilePicture, profile_picture_table)
mapper(ProfileSocial, profile_social_table)
mapper(ProfileInterest, profile_interest_table)
mapper(ProfileRequest, profile_request_table)
mapper(ProfileComment, profile_comment_table)
mapper(ProfileRelation, profile_relation_table)
mapper(ProfileOtherName, profile_other_name_table)
mapper(ProfileDevice, profile_device_table)
mapper(ProfileCronjob, profile_cronjob_table)
mapper(ProfileLink, profile_link_table)
mapper(Device, device_table)
mapper(Country, country_table)
mapper(ProfileTimesheet, profile_timesheet_table)
mapper(Company, company_table)
mapper(CompanyMember, company_member_table)
mapper(ProfileInteractionHistory, profile_interaction_table)
metadata.create_all(engine)
def update_list_from_jabber(_dict):
"""docstring for update_list_from_jabber"""
sess = Session()
profile = {}
try:
for u in _dict:
if u.startswith('private-chat'):
continue
profile['email'] = u
exists = sess.query(Profile).filter(Profile.email == u).all()
#logger.info('u %s' % type(u))
#logger.info('u %s' % u.encode('utf-8'))
#logger.info('exists %s' % exists)
if not exists:
logger.info('Updating %s ' % u)
us = {}
us['uuid'] = uuid.uuid4()
keystring = 'default'
salt = 'smarty-bot'
hash = hashlib.md5(salt + keystring).hexdigest()
us['password'] = hash
user = Account(**us)
sess.add(user)
sess.flush()
logger.info('uuid: %s saved with id: %s' % (
user.uuid, user.id))
profile['uuid'] = user.uuid
p = Profile(**profile)
sess.add(p)
sess.commit()
except Exception as e:
sess.rollback()
logger.exception(e)
def save_profile_property(uuid, prop, value):
"""docstring for save_property"""
sess = Session()
c = sess.query(Profile).filter(Profile.uuid == uuid)
c.update({prop: value})
logger.info('profile %s updated with: %s' % (prop, value))
sess.commit()
def add_profile(profile):
"""docstring for new profile"""
sess = Session()
try:
us = {}
us['uuid'] = uuid.uuid4()
keystring = 'default'
salt = 'smarty-bot'
hash = hashlib.md5(salt + keystring).hexdigest()
us['password'] = hash
user = Account(**us)
sess.add(user)
sess.flush()
logger.info('uuid: %s saved with id: %s' % (user.uuid, user.id))
profile['uuid'] = user.uuid
p = Profile(**profile)
sess.add(p)
sess.commit()
except Exception as e:
sess.rollback()
logger.exception(e)
def truncate_all(meta):
"""docstring for truncate_all"""
import contextlib
#for table in reversed(meta.Base.metadata.sorted_tables):
#meta.Session.execute(table.delete());
#meta.Session.commit()
with contextlib.closing(engine.connect()) as con:
trans = con.begin()
for table in reversed(meta.sorted_tables):
con.execute(table.delete())
trans.commit()
#truncates fine!
#truncate_all(metadata)
| 34.297393
| 74
| 0.643003
|
db115fbfdcc18b60caed595d87ea78a4a2a420f9
| 4,233
|
py
|
Python
|
tests/gcp/operators/test_text_to_speech.py
|
suensummit/airflow
|
37a342d0e96a91ce2d34085e225a4e86f54c4e21
|
[
"Apache-2.0"
] | 1
|
2017-06-25T14:18:15.000Z
|
2017-06-25T14:18:15.000Z
|
tests/gcp/operators/test_text_to_speech.py
|
suensummit/airflow
|
37a342d0e96a91ce2d34085e225a4e86f54c4e21
|
[
"Apache-2.0"
] | 3
|
2020-07-07T20:39:24.000Z
|
2021-09-29T17:34:46.000Z
|
tests/gcp/operators/test_text_to_speech.py
|
suensummit/airflow
|
37a342d0e96a91ce2d34085e225a4e86f54c4e21
|
[
"Apache-2.0"
] | 1
|
2020-11-04T03:17:51.000Z
|
2020-11-04T03:17:51.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from parameterized import parameterized
from airflow import AirflowException
from airflow.gcp.operators.text_to_speech import GcpTextToSpeechSynthesizeOperator
from tests.compat import ANY, Mock, PropertyMock, patch
PROJECT_ID = "project-id"
GCP_CONN_ID = "gcp-conn-id"
INPUT = {"text": "text"}
VOICE = {"language_code": "en-US"}
AUDIO_CONFIG = {"audio_encoding": "MP3"}
TARGET_BUCKET_NAME = "target_bucket_name"
TARGET_FILENAME = "target_filename"
class TestGcpTextToSpeech(unittest.TestCase):
@patch("airflow.gcp.operators.text_to_speech.GoogleCloudStorageHook")
@patch("airflow.gcp.operators.text_to_speech.GCPTextToSpeechHook")
def test_synthesize_text_green_path(self, mock_text_to_speech_hook, mock_gcp_hook):
mocked_response = Mock()
type(mocked_response).audio_content = PropertyMock(return_value=b"audio")
mock_text_to_speech_hook.return_value.synthesize_speech.return_value = mocked_response
mock_gcp_hook.return_value.upload.return_value = True
GcpTextToSpeechSynthesizeOperator(
project_id=PROJECT_ID,
gcp_conn_id=GCP_CONN_ID,
input_data=INPUT,
voice=VOICE,
audio_config=AUDIO_CONFIG,
target_bucket_name=TARGET_BUCKET_NAME,
target_filename=TARGET_FILENAME,
task_id="id",
).execute(context={"task_instance": Mock()})
mock_text_to_speech_hook.assert_called_once_with(gcp_conn_id="gcp-conn-id")
mock_gcp_hook.assert_called_once_with(google_cloud_storage_conn_id="gcp-conn-id")
mock_text_to_speech_hook.return_value.synthesize_speech.assert_called_once_with(
input_data=INPUT, voice=VOICE, audio_config=AUDIO_CONFIG, retry=None, timeout=None
)
mock_gcp_hook.return_value.upload.assert_called_once_with(
bucket_name=TARGET_BUCKET_NAME, object_name=TARGET_FILENAME, filename=ANY
)
@parameterized.expand(
[
("input_data", "", VOICE, AUDIO_CONFIG, TARGET_BUCKET_NAME, TARGET_FILENAME),
("voice", INPUT, "", AUDIO_CONFIG, TARGET_BUCKET_NAME, TARGET_FILENAME),
("audio_config", INPUT, VOICE, "", TARGET_BUCKET_NAME, TARGET_FILENAME),
("target_bucket_name", INPUT, VOICE, AUDIO_CONFIG, "", TARGET_FILENAME),
("target_filename", INPUT, VOICE, AUDIO_CONFIG, TARGET_BUCKET_NAME, ""),
]
)
@patch("airflow.gcp.operators.text_to_speech.GoogleCloudStorageHook")
@patch("airflow.gcp.operators.text_to_speech.GCPTextToSpeechHook")
def test_missing_arguments(
self,
missing_arg,
input_data,
voice,
audio_config,
target_bucket_name,
target_filename,
mock_text_to_speech_hook,
mock_gcp_hook,
):
with self.assertRaises(AirflowException) as e:
GcpTextToSpeechSynthesizeOperator(
project_id="project-id",
input_data=input_data,
voice=voice,
audio_config=audio_config,
target_bucket_name=target_bucket_name,
target_filename=target_filename,
task_id="id",
).execute(context={"task_instance": Mock()})
err = e.exception
self.assertIn(missing_arg, str(err))
mock_text_to_speech_hook.assert_not_called()
mock_gcp_hook.assert_not_called()
| 40.701923
| 94
| 0.704465
|
e8b339bf3d4666cc1e7391727bbfbfd39ea24d5f
| 2,333
|
py
|
Python
|
tcpserver/socketserver/server2.py
|
baixuexue123/performance
|
154aed84ec7a098cb00ae64c996a128266dc56fc
|
[
"MIT"
] | 1
|
2019-06-06T11:01:37.000Z
|
2019-06-06T11:01:37.000Z
|
tcpserver/socketserver/server2.py
|
baixuexue123/performance
|
154aed84ec7a098cb00ae64c996a128266dc56fc
|
[
"MIT"
] | null | null | null |
tcpserver/socketserver/server2.py
|
baixuexue123/performance
|
154aed84ec7a098cb00ae64c996a128266dc56fc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
nonblocking with select
"""
import errno
import select
import socket
read_handlers = {}
write_handlers = {}
def call_handlers(handlers, fds):
for fd in fds:
try:
handlers[fd]()
except IOError as e:
if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK):
continue
except KeyError:
pass
def loop():
while True:
reads, writes, x = select.select(read_handlers.keys(), write_handlers.keys(), [])
call_handlers(read_handlers, reads)
call_handlers(write_handlers, writes)
class ServerHandler(object):
def __init__(self, sock):
sock.setblocking(0)
self.sock = sock
read_handlers[sock.fileno()] = self.on_readable
def on_readable(self):
while True:
conn, _ = self.sock.accept()
EchoHandler(conn)
class EchoHandler(object):
def __init__(self, sock):
sock.setblocking(0)
self.sock = sock
self.buf = []
read_handlers[sock.fileno()] = self.on_readable
def _update(self):
if self.buf:
write_handlers[self.sock.fileno()] = self.on_writable
else:
write_handlers.pop(self.sock.fileno(), None)
def close(self):
fd = self.sock.fileno()
read_handlers.pop(fd, None)
write_handlers.pop(fd, None)
self.sock.close()
self.buf = []
def on_readable(self):
try:
while True:
data = self.sock.recv(4096)
if not data:
self.close()
return
self.buf.append(data)
finally:
self._update()
def on_writable(self):
try:
while self.buf:
data = self.buf[0]
sent = self.sock.send(data)
data = data[sent:]
if not data:
self.buf.pop(0)
else:
self.buf[0] = data
finally:
self._update()
def serve(addr):
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(addr)
sock.listen(50)
ServerHandler(sock)
loop()
if __name__ == '__main__':
serve(('0.0.0.0', 4000))
| 23.09901
| 89
| 0.536219
|
119fd9a6ac00b72d016f99eeb2c58a76f614f760
| 11,309
|
py
|
Python
|
src/sima/riflex/blueprints/axisymmetriccrosssection.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/riflex/blueprints/axisymmetriccrosssection.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
src/sima/riflex/blueprints/axisymmetriccrosssection.py
|
SINTEF/simapy
|
650b8c2f15503dad98e2bfc0d0788509593822c7
|
[
"MIT"
] | null | null | null |
#
# Generated with AxisymmetricCrossSectionBlueprint
from dmt.blueprint import Blueprint
from dmt.dimension import Dimension
from dmt.attribute import Attribute
from dmt.enum_attribute import EnumAttribute
from dmt.blueprint_attribute import BlueprintAttribute
from .crosssection import CrossSectionBlueprint
from .crsaxialfrictionmodel import CRSAxialFrictionModelBlueprint
from .timedomainvivloadcoefficients import TimeDomainVIVLoadCoefficientsBlueprint
class AxisymmetricCrossSectionBlueprint(CrossSectionBlueprint,CRSAxialFrictionModelBlueprint,TimeDomainVIVLoadCoefficientsBlueprint):
""""""
def __init__(self, name="AxisymmetricCrossSection", package_path="sima/riflex", description=""):
super().__init__(name,package_path,description)
self.attributes.append(Attribute("name","string","",default=""))
self.attributes.append(Attribute("description","string","",default=""))
self.attributes.append(Attribute("_id","string","",default=""))
self.attributes.append(BlueprintAttribute("scriptableValues","sima/sima/ScriptableValue","",True,Dimension("*")))
self.attributes.append(Attribute("staticFriction","number","Static friction force corresponding to elongation",default=0.0))
self.attributes.append(Attribute("staticElongation","number","Relative elongation",default=0.0))
self.attributes.append(Attribute("dynamicFriction","number","Dynamic friction force corresponding to elongation",default=0.0))
self.attributes.append(Attribute("dynamicElongation","number","Relative elongation",default=0.0))
self.attributes.append(Attribute("axialFriction","boolean","Local axial friction model",default=False))
self.attributes.append(EnumAttribute("vivLoadFormulation","sima/riflex/VIVLoadFormulation",""))
self.attributes.append(Attribute("cv","number","Vortex shedding load coefficient for cross-flow excitation (nondimensional)",default=0.0))
self.attributes.append(Attribute("fnull","number","Natural cross-flow vortex shedding frequency (nondimensional)",default=0.0))
self.attributes.append(Attribute("fmin","number","Minimum cross-flow vortex shedding frequency (nondimensional)",default=0.0))
self.attributes.append(Attribute("fmax","number","Maximum cross-flow vortex shedding frequency (nondimensional)",default=0.0))
self.attributes.append(Attribute("nmem","integer","Number of time steps used in calculation of standard deviation",default=500))
self.attributes.append(Attribute("cvil","number","Load coefficient for in-line excitation",default=0.0))
self.attributes.append(Attribute("alphil","number","Nondimensional parameter giving freedom to in-line excitation frequency",default=0.0))
self.attributes.append(Attribute("chh","number","Higher harmonic load coefficient (nondimensional)",default=0.0))
self.attributes.append(Attribute("fnullil","number","Natural in-line vortex shedding frequency (nondimensional)",default=0.0))
self.attributes.append(Attribute("fminil","number","Minimum in-line vortex shedding frequency (nondimensional)",default=0.0))
self.attributes.append(Attribute("fmaxil","number","Maximum in-line vortex shedding frequency (nondimensional)",default=0.0))
self.attributes.append(Attribute("scfkSpecification","boolean","Scaling of Froude-Krylov term in Morison’s equation in normal direction",default=True))
self.attributes.append(EnumAttribute("loadFormulation","sima/riflex/LoadFormulation",""))
self.attributes.append(Attribute("hydrodynamicDiameter","number","Hydrodynamic diameter",default=0.0))
self.attributes.append(Attribute("addedMassTanDir","number","Added mass in tangential direction",default=0.0))
self.attributes.append(Attribute("addedMassNormDir","number","Added mass in normal direction",default=0.0))
self.attributes.append(Attribute("dampingNormDir","number","Damping coefficients in normal direction",default=0.0))
self.attributes.append(Attribute("cdt","number","Quadratic drag coefficient in tangential direction.",default=0.0))
self.attributes.append(Attribute("cdn","number","Quadratic drag coefficient in normal direction.",default=0.0))
self.attributes.append(Attribute("cmt","number","Added mass per unit length in tangential direction.",default=0.0))
self.attributes.append(Attribute("cmn","number","Added mass per unit length in normal direction.",default=0.0))
self.attributes.append(Attribute("cdtl","number","Linear drag force coefficient in tangential direction.",default=0.0))
self.attributes.append(Attribute("cdnl","number","Linear drag force coefficient in normal direction.",default=0.0))
self.attributes.append(Attribute("cdx","number","Quadratic drag coefficient in tangential direction.",default=0.0))
self.attributes.append(Attribute("cdy","number","Quadratic drag coefficient in normal direction.",default=0.0))
self.attributes.append(Attribute("amx","number","Added mass per unit length in tangential direction.",default=0.0))
self.attributes.append(Attribute("amy","number","Added mass per unit length in normal direction.",default=0.0))
self.attributes.append(Attribute("cdlx","number","Linear drag force coefficient in tangential direction.",default=0.0))
self.attributes.append(Attribute("cdly","number","Linear drag force coefficient in normal direction.",default=0.0))
self.attributes.append(EnumAttribute("hydrodynamicInputCode","sima/riflex/HydrodynamicInputCode","Hydrodynamic input code"))
self.attributes.append(Attribute("scfk","number","Scaling factor for Froude-Krylov term in Morison’s equation in normal direction",default=1.0))
self.attributes.append(EnumAttribute("scfkt","sima/riflex/TangentialFroudeKrylovScaling","Scale for Froude-Krylov term in Morison’s equation in tangential direction"))
self.attributes.append(Attribute("massDampingSpecification","boolean","Mass proportional Rayleigh damping",default=False))
self.attributes.append(Attribute("stiffnessDampingSpecification","boolean","Stiffness proportional Rayleigh damping",default=False))
self.attributes.append(Attribute("axialDampingSpecification","boolean","Local axial damping model",default=False))
self.attributes.append(Attribute("temperature","number","Temperature at which the specification applies",default=0.0))
self.attributes.append(Attribute("alpha","number","Thermal expansion coefficient",default=0.0))
self.attributes.append(Attribute("beta","number","Pressure expansion coefficient",default=0.0))
self.attributes.append(BlueprintAttribute("massDamping","sima/riflex/CRSMassDamping","",True))
self.attributes.append(BlueprintAttribute("stiffnessDamping","sima/riflex/CRSStiffnessDamping","",True))
self.attributes.append(BlueprintAttribute("axialDamping","sima/riflex/CRSAxialDamping","",True))
self.attributes.append(Attribute("defaultExpansion","boolean","Use default thermal and pressure expansion settings",default=True))
self.attributes.append(Attribute("cdax","number","Quadratic aerodynamic drag force coefficient per unit length in tangential direction",default=0.0))
self.attributes.append(Attribute("cday","number","Quadratic aerodynamic drag force coefficient per unit length in normal direction",default=0.0))
self.attributes.append(Attribute("cdaz","number","Quadratic aerodynamic drag force coefficient per unit length in z direction",default=0.0))
self.attributes.append(EnumAttribute("aerodynamicInputCode","sima/riflex/AerodynamicInputCode","Aerodynamic input code"))
self.attributes.append(Attribute("aerodynamicDiameter","number","Aerodynamic diameter",default=0.0))
self.attributes.append(BlueprintAttribute("massVolume","sima/riflex/AxisymmetricCrossSectionMassVolume","",True))
self.attributes.append(EnumAttribute("axialStiffnessInput","sima/riflex/AxialStiffness","Axial stiffness input specification"))
self.attributes.append(EnumAttribute("bendingStiffnessInput","sima/riflex/BendingStiffness","Bending stiffness input specification"))
self.attributes.append(EnumAttribute("torsionStiffnessInput","sima/riflex/TorsionStiffness","Torsion stiffness input specification"))
self.attributes.append(Attribute("pressureDependency","integer","Pressure dependency parameter related to bending moment",default=0))
self.attributes.append(EnumAttribute("hysteresisOption","sima/riflex/Hysteresis","Hysteresis option in bending moment / curvature relation"))
self.attributes.append(Attribute("hardeningParameter","number","Hardening parameter",default=0.0))
self.attributes.append(Attribute("axialStiffness","number","Axial stiffness",default=0.0))
self.attributes.append(BlueprintAttribute("axialStiffnessCharacteristics","sima/riflex/AxialStiffnessItem","",True,Dimension("*")))
self.attributes.append(Attribute("bendingStiffness","number","Bending stiffness around y-axis",default=0.0))
self.attributes.append(Attribute("intFrictionMoment","number","Internal friction moment.",default=0.0))
self.attributes.append(Attribute("shearStiffness","number","Shear stiffness",default=0.0))
self.attributes.append(BlueprintAttribute("bendingStiffnessCharacteristics","sima/riflex/BendingStiffnessY_Item","",True,Dimension("*")))
self.attributes.append(Attribute("negativeTorsionStiffness","number","Torsion stiffness for negative twist.",default=0.0))
self.attributes.append(Attribute("positiveTorsionStiffness","number","Torsion stiffness for positive twist.",default=0.0))
self.attributes.append(BlueprintAttribute("torsionStiffnessCharacteristics","sima/riflex/TorsionStiffnessItem","",True,Dimension("*")))
self.attributes.append(Attribute("tensionCapacity","number","Tension capacity",default=0.0))
self.attributes.append(Attribute("maxCurvature","number","Maximum curvature",default=0.0))
self.attributes.append(EnumAttribute("barBeam","sima/riflex/BarBeam","Cross section type"))
self.attributes.append(Attribute("stiffnessFactor","number","Initial stiffness factor for internal friction moment",default=10.0))
self.attributes.append(Attribute("submerged","boolean","Use formulation for partly submerged cross-section",default=False))
self.attributes.append(Attribute("coupledBendingTorsion","boolean","Geometric stiffness coupling between bending and torsion",default=False))
self.attributes.append(EnumAttribute("hydrodynamicRadiationInputCode","sima/riflex/HydrodynamicRadiationInputCode","Code for input of simplified radiation force coefficients"))
self.attributes.append(Attribute("solidityRatio","number","Solidity ratio.",default=0.0))
self.attributes.append(Attribute("netWidthEnd1","number","Net width at segment end 1",default=0.0))
self.attributes.append(Attribute("netWidthEnd2","number","Net width at segment end 2",default=0.0))
self.attributes.append(Attribute("currentVelocityScaling","number","Ratio between reduced current speed and ambient current speed due to upstream net shadowing effects",default=1.0))
| 113.09
| 190
| 0.762402
|
de53edab605204f988fd6f933d289c9b8571a011
| 2,965
|
py
|
Python
|
conpaas-services/src/conpaas/core/misc.py
|
bopopescu/conpaas-1
|
cea3c02f499a729464697de7cf98c2041febc0ab
|
[
"BSD-3-Clause"
] | 1
|
2015-08-03T03:57:06.000Z
|
2015-08-03T03:57:06.000Z
|
conpaas-services/src/conpaas/core/misc.py
|
bopopescu/conpaas-1
|
cea3c02f499a729464697de7cf98c2041febc0ab
|
[
"BSD-3-Clause"
] | null | null | null |
conpaas-services/src/conpaas/core/misc.py
|
bopopescu/conpaas-1
|
cea3c02f499a729464697de7cf98c2041febc0ab
|
[
"BSD-3-Clause"
] | 2
|
2017-05-27T09:07:53.000Z
|
2020-07-26T03:15:55.000Z
|
import socket
import fcntl
import struct
import zipfile
import tarfile
import readline
from subprocess import Popen, PIPE
def file_get_contents(filepath):
f = open(filepath, 'r')
filecontent = f.read()
f.close()
return filecontent
def verify_port(port):
'''Raise Type Error if port is not an integer.
Raise ValueError if port is an invlid integer value.
'''
if type(port) != int: raise TypeError('port should be an integer')
if port < 1 or port > 65535: raise ValueError('port should be a valid port number')
def verify_ip_or_domain(ip):
'''Raise TypeError f ip is not a string.
Raise ValueError if ip is an invalid IP address in dot notation.
'''
if (type(ip) != str and type(ip) != unicode):
raise TypeError('IP is should be a string')
try:
socket.gethostbyname(ip)
except Exception as e:
raise ValueError('Invalid IP string "%s" -- %s' % (ip, e))
def verify_ip_port_list(l):
'''Check l is a list of [IP, PORT]. Raise appropriate Error if invalid types
or values were found
'''
if type(l) != list:
raise TypeError('Expected a list of [IP, PORT]')
for pair in l:
if len(pair) != 2:
raise TypeError('List should contain IP,PORT values')
if 'ip' not in pair or 'port' not in pair:
raise TypeError('List should contain IP,PORT values')
verify_ip_or_domain(pair['ip'])
verify_port(pair['port'])
def archive_get_type(name):
if tarfile.is_tarfile(name):
return 'tar'
elif zipfile.is_zipfile(name):
return 'zip'
else: return None
def archive_open(name):
if tarfile.is_tarfile(name):
return tarfile.open(name)
elif zipfile.is_zipfile(name):
return zipfile.ZipFile(name)
else: return None
def archive_get_members(arch):
if isinstance(arch, zipfile.ZipFile):
members = arch.namelist()
elif isinstance(arch, tarfile.TarFile):
members = [ i.name for i in arch.getmembers() ]
return members
def archive_extract(arch, path):
if isinstance(arch, zipfile.ZipFile):
arch.extractall(path)
elif isinstance(arch, tarfile.TarFile):
arch.extractall(path=path)
def archive_close(arch):
if isinstance(arch, zipfile.ZipFile)\
or isinstance(arch, tarfile.TarFile):
arch.close()
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def run_cmd(cmd, directory='/'):
pipe = Popen(cmd, shell=True, cwd=directory, stdout=PIPE, stderr=PIPE)
out, error = pipe.communicate()
pipe.wait()
return out, error
def rlinput(prompt, prefill=''):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return raw_input(prompt)
finally:
readline.set_startup_hook()
| 29.65
| 87
| 0.657336
|
9a16d0b7cc347a3bc0ca4c2837594a3525a6a0ec
| 19,102
|
py
|
Python
|
hdl_cores/auto-fir/auto-fir.py
|
hyf6661669/CEP
|
704caf0fd5589de40f91cb1d7e7b169f741260c5
|
[
"BSD-2-Clause"
] | 47
|
2017-10-25T17:35:29.000Z
|
2022-03-23T20:05:47.000Z
|
hdl_cores/auto-fir/auto-fir.py
|
hyf6661669/CEP
|
704caf0fd5589de40f91cb1d7e7b169f741260c5
|
[
"BSD-2-Clause"
] | 13
|
2019-10-02T14:23:05.000Z
|
2022-03-31T17:12:49.000Z
|
hdl_cores/auto-fir/auto-fir.py
|
hyf6661669/CEP
|
704caf0fd5589de40f91cb1d7e7b169f741260c5
|
[
"BSD-2-Clause"
] | 21
|
2018-04-10T17:14:30.000Z
|
2022-02-25T14:29:57.000Z
|
#!/usr/bin/python3
#//************************************************************************
#// Copyright 2021 Massachusetts Institute of Technology
#// SPDX short identifier: BSD-2-Clause
#//
#// File Name: auto-fir.py
#// Program: Common Evaluation Platform (CEP)
#// Description: Peform a parameterized generation of a FIR/IIR filter
#// and subsequently generate and/or verify outputs
#// Notes:
#//
#//************************************************************************
# Import Required packages
import os
import sys
import random
import argparse
import subprocess
# -------------------------------------------------------------------------
# Function : cmd_exits
# Description : A quick function to see if a command is available
# -------------------------------------------------------------------------
def cmd_exists(cmd):
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
# -------------------------------------------------------------------------
# End Function : cmd_exists
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Function : get_sources
# Description : Function to fetch and extract the tarball sources
# -------------------------------------------------------------------------
def get_sources(args):
print (" One or both of the expected executables (./firgen/iirGen.pl, ./synth/acm) is missing")
# Do we have the tarball sources or do we need to grab them?
if not (os.path.isfile('./firgen.tgz') or os.path.isfile('./synth-jan-14-2009.tar.gz')):
print(" Getting the tarball sources")
# Save the current working directory
cwd = os.path.dirname(os.path.realpath(sys.argv[0]))
# Capture the repo's root directory
repo_root = subprocess.run('git rev-parse --show-toplevel', check=True, shell=True, universal_newlines=True, stdout=subprocess.PIPE)
# Change to the repo's root directory
os.chdir(repo_root.stdout.rstrip())
# Excute the get_external_depencies script to fetch the FIR generator source
if args.verbose:
os.system("./get_external_dependencies.sh matching auto-fir yes")
else:
os.system("./get_external_dependencies.sh matching auto-fir yes >> /dev/null 2>&1")
# Return to the generared filter directory
os.chdir(cwd)
print(" Extracting tarballs and building the acm executable")
# Perform the build (verbosely)
if args.verbose:
# Create the outputs directory
os.system("mkdir -p outputs")
# Extract the synth directory
os.system("rm -rf ./synth")
os.system("mkdir -p ./synth")
os.system("tar -C ./synth --strip-components=1 -zxvf synth-jan-14-2009.tar.gz")
# Patch a few files to allow compile
os.chdir("./synth")
os.system("sed -i '1i#include <cstdlib>' arith.h")
os.system("sed -i '1i#include <algorithm>' bhm.cpp")
# Build the mutliplier executable
os.system("make acm")
# Return back to the "original" directory
os.chdir("..")
# Extract the firgen
os.system("rm -rf ./firgen")
os.system("tar zxvf firgen.tgz")
# Perform the build (non-verbosely)
else:
# Create the outputs directory
os.system("mkdir -p outputs >> /dev/null 2>&1")
# Extract the synth directory
os.system("rm -rf ./synth >> /dev/null 2>&1")
os.system("mkdir -p ./synth >> /dev/null 2>&1")
os.system("tar -C ./synth --strip-components=1 -zxvf synth-jan-14-2009.tar.gz >> /dev/null 2>&1")
# Patch a few files to allow compile
os.chdir("./synth")
os.system("sed -i '1i#include <cstdlib>' arith.h >> /dev/null 2>&1")
os.system("sed -i '1i#include <algorithm>' bhm.cpp >> /dev/null 2>&1")
# Build the mutliplier executable
os.system("make acm >> /dev/null 2>&1")
# Return back to the "original" directory
os.chdir("..")
# Extract the firgen
os.system("rm -rf ./firgen >> /dev/null 2>&1")
os.system("tar zxvf firgen.tgz >> /dev/null 2>&1")
# -------------------------------------------------------------------------
# End Function : get_sources
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Function : clean
# Description : Perform some cleanup
# -------------------------------------------------------------------------
def clean(args):
if args.verbose:
print("")
print(" Removing the build/output directories")
print("")
if args.allclean:
os.system("rm -f firgen.tgz")
os.system("rm -f synth-jan-14-2009.tar.gz")
os.system("rm -rf firgen")
os.system("rm -rf synth")
os.system("rm -rf outputs")
os.system("rm -rf work")
os.system("rm -rf __pycache__")
os.system("rm -f transcript")
os.system("rm -f *.firlog")
# -------------------------------------------------------------------------
# End Function : all_sources
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Function : setup_parser
# Description : Setup the parser and associated arguments
# -------------------------------------------------------------------------
def setup_parser():
# Create the argument parser
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
group1 = parser.add_argument_group("General Arguments")
group2 = parser.add_argument_group("Build Only Arguments")
group3 = parser.add_argument_group("Simulation Only Arguments")
group4 = parser.add_argument_group("Build and Simulation Arguments")
# General arguments
group1.add_argument("--clean", help="remove the build/output directories",
action="store_true")
group1.add_argument("--allclean", help="remove the tarballs and the build/output directories",
action="store_true")
group1.add_argument("-b", "--build", help="Generate the filter verilog",
action="store_true")
group1.add_argument("-sg", "--simgen", help="Run the filter simulation in generate test vector mode",
action="store_true")
group1.add_argument("-sv", "--simver", help="Run the filter simulation in verification mode",
action="store_true")
group1.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
# Build arguments
group2.add_argument("-a", "--amnesia", help="Random verilog identifiers",
action="store_true")
group2.add_argument("-seed", "--random_seed", type=int, default=-1,
help="Specify the PRNG seed (if not specified, system time will be used")
group2.add_argument("-aminn", "--a_min_number", type=int, default=1,
help="Specify the minimum number of A coefficients")
group2.add_argument("-amaxn", "--a_max_number", type=int, default=20,
help="Specify the maximum number of A coefficients")
group2.add_argument("-bminn", "--b_min_number", type=int, default=1,
help="Specify the minimum number of B coefficients")
group2.add_argument("-bmaxn", "--b_max_number", type=int, default=20,
help="Specify the maximum number of B coefficients")
group2.add_argument("-aminv", "--a_min_value", type=int, default=1,
help="Specify the minimum possible A coefficient")
group2.add_argument("-amaxv", "--a_max_value", type=int, default=2**25-1,
help="Specify the maximum possible A coefficient")
group2.add_argument("-bminv", "--b_min_value", type=int, default=1,
help="Specify the minimum possible B coefficient")
group2.add_argument("-bmaxv", "--b_max_value", type=int, default=2**25-1,
help="Specify the maximum possible B coefficient")
# Simulation arguments
group3.add_argument("-sam", "--samples", type=int, default=100,
help="Specify the number of samples to capture")
group3.add_argument("-cf", "--capture_file", type=str, default="filter_tb_capture.firlog",
help="Specify the sample capture file")
group3.add_argument("-log", "--log_file", type=str, default="filter.firlog",
help="Simulation log file")
# Build and Simulation Arguments
group4.add_argument("-dut", "--dut_name", type=str, default="filter",
help="Specify the DUT module name. Expected verilog filename is <DUT>.v\n ")
# Return the resulting data structure
return parser.parse_args()
# -------------------------------------------------------------------------
# End Function : setup_parser
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Function : run_build
# Description : Generate the filter based on the specied argument
# -------------------------------------------------------------------------
def run_build(args):
# Verify the presence of the iirGenn.pl and acm executables
if not (os.path.isfile('./firgen/iirGen.pl') or os.path.isfile('./synth/acm')):
get_sources(args)
# Print the parameters
if args.verbose:
print("")
print("Build Options -")
print(" random_seed = " + str(args.random_seed))
print(" a_min_number = " + str(args.a_min_number))
print(" a_max_number = " + str(args.a_max_number))
print(" a_min_value = " + str(args.a_min_value))
print(" a_max_value = " + str(args.a_max_value))
print(" b_min_number = " + str(args.b_min_number))
print(" b_max_number = " + str(args.b_max_number))
print(" b_min_value = " + str(args.b_min_value))
print(" b_max_value = " + str(args.b_max_value))
print("")
# Do some basic error checking on the parameters (to avoid breaking python OR perl)
if (args.a_min_number > args.a_max_number or args.a_min_number > 20 or args.a_max_number > 20):
sys.exit("ERROR: Both a_max_number and a_min_number must be <=20 AND min <= max")
if (args.b_min_number > args.b_max_number or args.b_min_number > 20 or args.b_max_number > 20):
sys.exit("ERROR: Both b_max_number and b_min_number must be <=20 AND min <= max")
if (args.a_min_value > args.a_max_value or args.a_min_value > 2**25-1 or args.a_max_value > 2**25-1):
sys.exit("ERROR: Both a_max_value and a_min_value must be <=2**25 AND min <= max")
if (args.b_min_value > args.b_max_value or args.b_min_value > 2**25-1 or args.b_max_value > 2**25-1):
sys.exit("ERROR: Both a_max_value and a_min_value must be <=2**25 AND min <= max")
# Initialize the PRNG. If the random_seed is non-negative, then
# it will be used. Otherwise, the system time will be used
if (args.random_seed >= 0):
random.seed(args.random_seed)
else:
random.seed()
# How many A and B constants should we generate?
a_number = random.randint(args.a_min_number, args.a_max_number)
b_number = random.randint(args.b_min_number, args.b_max_number)
# Initialize the A and B constant arrays
a_constants = "1 "
b_constants = ""
# Generate the A constants
for x in range(0, a_number):
a_constants = a_constants + str(random.randint(args.a_min_value, args.a_max_value)) + " "
# Generate the B constants
for x in range(0, b_number):
b_constants = b_constants + str(random.randint(args.b_min_value, args.b_max_value)) + " "
# Constructs the command string for calling the IIR Generator
dut_filename = "{}.v".format(args.dut_name)
command_string = "./iirGen.pl " + \
"-A " + a_constants + \
"-B " + b_constants + \
"-moduleName {} ".format(args.dut_name) + \
"-fractionalBits 0 " + \
"-bitWidth 64 " + \
"-inData inData " + \
"-inReg " + \
"-outReg " + \
"-outData outData " + \
"-clk clk " + \
"-reset reset " + \
"-reset_edge negedge " + \
"-filterForm 1 " + \
"-outFile ../outputs/" + \
dut_filename
# Change to the firgen directory
os.chdir("./firgen")
# Call the IIR generator using the constructed command string
if args.verbose:
print(" Calling the IIR Generator using the following command -")
print(command_string)
os.system("time " + command_string)
else:
print(" Calling the IIR Generator")
os.system(command_string)
# Return to the original directory
os.chdir("..")
# Run amnesia on the resulting file randomize signal names
if args.amnesia:
amnesia_command = "./amnesia.py ./outputs/" + dut_filename
if args.verbose:
print(" Running amnesia on the filter using the following command -")
print(amnesia_command)
else:
print(" Running amnesia on the filter")
with open("./outputs/tmp.v","w") as outfile:
subprocess.run(amnesia_command, check=True, shell=True, stdout=outfile)
os.system("cp ./outputs/tmp.v ./outputs/" + dut_filename)
# Remove intermediate files
os.system("rm -f ./outputs/tmp.v")
os.system("rm -f parser.out")
os.system("rm -f parsetab.py")
# -------------------------------------------------------------------------
# End Function : run_build
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Function : run_questa_simulation
# Description : Will run the simulation in either CAPTURE or VERIFY
# mode
# -------------------------------------------------------------------------
def run_questa_simulation(args):
# Run simulation in verbose mode
if args.verbose:
# Using the IIR_filter_tb.v, generate a series of input/output pairs on the newly generated core
# Remove the previous working directory
os.system("rm -rf work")
# Compile the newly generated core
os.system("vlog ./outputs/{}.v".format(args.dut_name))
# Compile the testbench in either CAPTURE or VERIFY mode
if args.simgen:
system_call = 'vlog +define+CAPTURE +define+NUM_SAMPLES={} +define+DUT_NAME={} '.format(args.samples, args.dut_name) + \
'+define+CAPTURE_FILE=\\\"{}\\\" filter_tb.v'.format(args.capture_file)
print("COMMAND: " + system_call)
os.system(system_call)
else:
system_call = 'vlog +define+VERIFY +define+NUM_SAMPLES={} +define+DUT_NAME={} '.format(args.samples, args.dut_name) + \
'+define+CAPTURE_FILE=\\\"{}\\\" filter_tb.v'.format(args.capture_file)
print("COMMAND: " + system_call)
os.system(system_call)
# Define optimization options
os.system("vopt +acc work.filter_tb -o dbugver")
# Run the simulation
os.system("vsim dbugver -classdebug +notimingchecks -c +trace_enable -do \"run -all;quit\" -l {};".format(args.log_file))
# Run simulation in NOT verbose mode
else:
# Using the IIR_filter_tb.v, generate a series of input/output pairs on the newly generated core
# Remove the previous working directory
os.system("rm -rf work >> /dev/null 2>&1")
# Compile the newly generated core
os.system("vlog ./outputs/{}.v >> /dev/null 2>&1".format(args.dut_name))
# Compile the testbench in either CAPTURE or VERIFY mode
if args.simgen:
system_call = 'vlog +define+CAPTURE +define+NUM_SAMPLES={} +define+DUT_NAME={} '.format(args.samples, args.dut_name) + \
'+define+CAPTURE_FILE=\\\"{}\\\" filter_tb.v >> /dev/null 2>&1'.format(args.capture_file)
os.system(system_call)
else:
system_call = 'vlog +define+VERIFY +define+NUM_SAMPLES={} +define+DUT_NAME={} '.format(args.samples, args.dut_name) + \
'+define+CAPTURE_FILE=\\\"{}\\\" filter_tb.v >> /dev/null 2>&1'.format(args.capture_file)
os.system(system_call)
# Define optimization options
os.system("vopt +acc work.filter_tb -o dbugver >> /dev/null 2>&1")
# Run the simulation
os.system("vsim dbugver -classdebug +notimingchecks -c +trace_enable -do \"run -all;quit\" -l {};".format(args.log_file))
# -------------------------------------------------------------------------
# End Function : run_simulation
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# Function : main
# Description : Main function
# -------------------------------------------------------------------------
# Setup the argument parser
args = setup_parser()
if args.verbose:
print ("")
print ("------------------------------------------------------------------------------------------")
print ("--- CEP Filter Generator ---")
print ("------------------------------------------------------------------------------------------")
print ("")
# Do some cleanup
if args.clean or args.allclean:
clean(args)
# Run the generator, if specified so to do
elif args.build:
run_build(args)
elif args.simgen or args.simver:
# Verify vsim is available
if (not cmd_exists("vsim")):
sys.exit("ERROR: vsim not available\n")
# Call the questa-based simulation
run_questa_simulation(args)
# end of if args.build
else:
print ("")
print ("Usage info - ./" + os.path.basename(__file__) + " -h")
print ("")
if args.verbose:
print ("")
print ("------------------------------------------------------------------------------------------")
print ("--- CEP Filter Generator Complete ---")
print ("------------------------------------------------------------------------------------------")
print ("")
# -------------------------------------------------------------------------
# End Function : main
# -------------------------------------------------------------------------
| 42.733781
| 140
| 0.525128
|
cc51e341f46cb74d07dff2c0d58a64966cd9661b
| 3,851
|
py
|
Python
|
tensorflow/python/autograph/utils/ag_logging.py
|
jeffdaily/tensorflow-upstream
|
2ac94cf58dafd29ddeb086a913a130711ae6712e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/autograph/utils/ag_logging.py
|
jeffdaily/tensorflow-upstream
|
2ac94cf58dafd29ddeb086a913a130711ae6712e
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/autograph/utils/ag_logging.py
|
jeffdaily/tensorflow-upstream
|
2ac94cf58dafd29ddeb086a913a130711ae6712e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging and debugging utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
# TODO(mdan): Use a custom logger class.
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
VERBOSITY_VAR_NAME = 'AUTOGRAPH_VERBOSITY'
DEFAULT_VERBOSITY = 0
verbosity_level = None # vlog-like. Takes precedence over the env variable.
echo_log_to_stdout = False
# In interactive Python, logging echo is enabled by default.
if hasattr(sys, 'ps1') or hasattr(sys, 'ps2'):
echo_log_to_stdout = True
@tf_export('autograph.set_verbosity')
def set_verbosity(level, alsologtostdout=False):
"""Sets the AutoGraph verbosity level.
_Debug logging in AutoGraph_
More verbose logging is useful to enable when filing bug reports or doing
more in-depth debugging.
There are two controls that control the logging verbosity:
* The `set_verbosity` function
* The `AUTOGRAPH_VERBOSITY` environment variable
`set_verbosity` takes precedence over the environment variable.
For example:
```python
import os
import tensorflow as tf
os.environ['AUTOGRAPH_VERBOSITY'] = 5
# Verbosity is now 5
tf.autograph.set_verbosity(0)
# Verbosity is now 0
os.environ['AUTOGRAPH_VERBOSITY'] = 1
# No effect, because set_verbosity was already called.
```
Logs entries are output to [absl](https://abseil.io)'s default output,
with `INFO` level.
Logs can be mirrored to stdout by using the `alsologtostdout` argument.
Mirroring is enabled by default when Python runs in interactive mode.
Args:
level: int, the verbosity level; larger values specify increased verbosity;
0 means no logging. When reporting bugs, it is recommended to set this
value to a larges number, like 10.
alsologtostdout: bool, whether to also output log messages to `sys.stdout`.
"""
global verbosity_level
global echo_log_to_stdout
verbosity_level = level
echo_log_to_stdout = alsologtostdout
@tf_export('autograph.trace')
def trace(*args):
"""Traces argument information at compilation time.
`trace` is useful when debugging, and it always executes during the tracing
phase, that is, when the TF graph is constructed.
_Example usage_
```python
import tensorflow as tf
for i in tf.range(10):
tf.autograph.trace(i)
# Output: <Tensor ...>
```
Args:
*args: Arguments to print to `sys.stdout`.
"""
print(*args)
def get_verbosity():
global verbosity_level
if verbosity_level is not None:
return verbosity_level
return os.getenv(VERBOSITY_VAR_NAME, DEFAULT_VERBOSITY)
def has_verbosity(level):
return get_verbosity() >= level
def error(level, msg, *args, **kwargs):
if has_verbosity(level):
logging.error(msg, *args, **kwargs)
if echo_log_to_stdout:
print(msg % args)
def log(level, msg, *args, **kwargs):
if has_verbosity(level):
logging.info(msg, *args, **kwargs)
if echo_log_to_stdout:
print(msg % args)
def warn_first_n(msg, *args, **kwargs):
logging.log_first_n(logging.WARNING, msg, *args, **kwargs)
| 28.316176
| 80
| 0.726824
|
f7592898a6060fa4986bb4d99efadd7f7bda97c3
| 3,061
|
py
|
Python
|
tests/test_utils_get_static_file.py
|
loonghao/dayu_widgets
|
42758872993197880f68d141ee1ce314f9b2cfea
|
[
"MIT"
] | null | null | null |
tests/test_utils_get_static_file.py
|
loonghao/dayu_widgets
|
42758872993197880f68d141ee1ce314f9b2cfea
|
[
"MIT"
] | null | null | null |
tests/test_utils_get_static_file.py
|
loonghao/dayu_widgets
|
42758872993197880f68d141ee1ce314f9b2cfea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test get_static_file function.
"""
# Import future modules
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import built-in modules
import os
# Import third-party modules
from dayu_widgets import CUSTOM_STATIC_FOLDERS
from dayu_widgets import DEFAULT_STATIC_FOLDER
from dayu_widgets import utils
import pytest
@pytest.fixture(scope="module", name="custom_folder")
def setup_custom_folder(tmpdir_factory):
"""Create a folder to represent user's custom static folder"""
# user has his custom static folder
# put this icons or images in it
tmp_folder = tmpdir_factory.mktemp("my_static")
# create a file in base dir
tmp_folder.join("add_line.svg").ensure()
# create a sub folder and a file in the sub folder
tmp_folder.join("sub_folder", "sub_file.png").ensure()
return tmp_folder
@pytest.mark.parametrize(
"input_path, output_path",
(
("add_line.svg", os.path.join(DEFAULT_STATIC_FOLDER, "add_line.svg")),
("check.svg", os.path.join(DEFAULT_STATIC_FOLDER, "check.svg")),
("", None),
("a_not_exists_file", None),
(
os.path.join(os.path.dirname(__file__), "for_test.txt"),
os.path.join(os.path.dirname(__file__), "for_test.txt"),
), # user give a full path file, return
("main.qss", os.path.join(DEFAULT_STATIC_FOLDER, "main.qss")),
),
)
def test_get_static_file(input_path, output_path):
"""Only default static file. Test different situation input."""
assert utils.get_static_file(input_path) == output_path
def test_custom_static_folder(custom_folder):
"""Test when user append a custom static folder."""
CUSTOM_STATIC_FOLDERS.append(str(custom_folder))
for input_file, result in (
("add_line.svg", os.path.join(DEFAULT_STATIC_FOLDER, "add_line.svg")),
("check.svg", os.path.join(DEFAULT_STATIC_FOLDER, "check.svg")),
("", None),
("a_not_exists_file", None),
(
os.path.join(os.path.dirname(__file__), "for_test.txt"),
# user give a full path file, return
os.path.join(os.path.dirname(__file__), "for_test.txt"),
),
(
"sub_folder/sub_file.png",
os.path.join(str(custom_folder), "sub_folder/sub_file.png"),
),
):
assert utils.get_static_file(input_file) == result
@pytest.mark.parametrize(
"input_file, error_type",
(
(3, int),
(set(), set),
({}, dict),
(["g"], list),
((2,), tuple),
(object(), object),
),
)
def test_with_wrong_type(input_file, error_type):
"""Make sure when user give a wrong type arg, raise TypeError"""
with pytest.raises(TypeError) as exc_info:
utils.get_static_file(input_file)
exception_msg = exc_info.value.args[0]
assert (
exception_msg == "Input argument 'path' should be basestring type, "
"but get {}".format(error_type)
)
| 31.885417
| 78
| 0.653055
|
8e9cc9de679f043e8843b980c44c52f5361c558e
| 1,769
|
py
|
Python
|
package/spack-xapian-core/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | 1
|
2018-07-17T07:45:09.000Z
|
2018-07-17T07:45:09.000Z
|
package/spack-xapian-core/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
package/spack-xapian-core/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class XapianCore(AutotoolsPackage):
"""Xapian is a highly adaptable toolkit which allows developers to easily
add advanced indexing and search facilities to their own applications.
It supports the Probabilistic Information Retrieval model and also
supports a rich set of boolean query operators."""
homepage = "https://xapian.org"
url = "http://oligarchy.co.uk/xapian/1.4.3/xapian-core-1.4.3.tar.xz"
version('1.4.3', '143f72693219f7fc5913815ed858f295')
depends_on('zlib')
| 44.225
| 78
| 0.688525
|
fccd228fc6b01f3a8f112f864173be5a7a5fbb39
| 731
|
py
|
Python
|
posts/migrations/0005_auto_20200601_0209.py
|
ChegeDaniella/Instagram
|
aaa8a965f10e06397d0faacb3791aef7c1e454c0
|
[
"MIT"
] | null | null | null |
posts/migrations/0005_auto_20200601_0209.py
|
ChegeDaniella/Instagram
|
aaa8a965f10e06397d0faacb3791aef7c1e454c0
|
[
"MIT"
] | 4
|
2020-06-06T01:57:02.000Z
|
2021-09-08T02:06:24.000Z
|
posts/migrations/0005_auto_20200601_0209.py
|
ChegeDaniella/Instagram
|
aaa8a965f10e06397d0faacb3791aef7c1e454c0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-05-31 23:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('posts', '0004_auto_20200601_0147'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='image_likes',
),
migrations.AddField(
model_name='post',
name='image_likes',
field=models.ManyToManyField(blank=True, default=None, related_name='image_likes', to=settings.AUTH_USER_MODEL),
),
]
| 27.074074
| 124
| 0.644323
|
cd706b6af32770fdcdfb2ebb5e2312e3a2020093
| 1,403
|
py
|
Python
|
setup.py
|
jkapila/py-git-package
|
7f7faf88802f593e427ca42569591100f61212c4
|
[
"MIT"
] | null | null | null |
setup.py
|
jkapila/py-git-package
|
7f7faf88802f593e427ca42569591100f61212c4
|
[
"MIT"
] | null | null | null |
setup.py
|
jkapila/py-git-package
|
7f7faf88802f593e427ca42569591100f61212c4
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
# Note:
# The Hitchiker's guide to python provides an excellent, standard, method for creating python packages:
# http://docs.python-guide.org/en/latest/writing/structure/
#
# To deploy on PYPI follow the instructions at the bottom of:
# https://packaging.python.org/tutorials/distributing-packages/#uploading-your-project-to-pypi
with open("README.md") as f:
readme_text = f.read()
with open("LICENSE") as f:
license_text = f.read()
setup(
name="py-git-package",
version="0.0.1",
py_modules=[],
install_requires=[],
url="https://www.github.com/username/py-git-package",
license="MIT",
author="user you",
description="An awesome package for all",
long_description=readme_text,
long_description_content_type="text/markdown",
packages=find_packages(exclude=("tests", "docsrc")),
include_package_data=True,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
keywords=["package", "pre-commit", "furo", "package development"],
)
| 33.404762
| 105
| 0.662865
|
3b0af25665229bf43688fae27105b83ba6f89c7e
| 808
|
py
|
Python
|
ig/plot_result.py
|
wangjunxiao/DLG
|
befd9c31d214098fdccf97794428a5550bbf8b1a
|
[
"MIT"
] | null | null | null |
ig/plot_result.py
|
wangjunxiao/DLG
|
befd9c31d214098fdccf97794428a5550bbf8b1a
|
[
"MIT"
] | null | null | null |
ig/plot_result.py
|
wangjunxiao/DLG
|
befd9c31d214098fdccf97794428a5550bbf8b1a
|
[
"MIT"
] | null | null | null |
import os
import math
import matplotlib.pyplot as plt
from PIL import Image
num_images = 20
config_hash = '803521804a9a4c844495134db73291e4'
if __name__ == "__main__":
# plot the resulting image
gt_name = 'ground_truth'
rec_name = 'rec'
col_num = 10
row_num = math.ceil(num_images/col_num)
for image_plot in range(num_images):
gt_file = Image.open(os.path.join(f'results/{config_hash}/', f'{image_plot}_{gt_name}.png'))
rec_file = Image.open(os.path.join(f'results/{config_hash}/', f'{image_plot}_{rec_name}.png'))
plt.subplot(2*row_num,col_num,image_plot+1)
plt.imshow(gt_file), plt.axis('off')
plt.subplot(2*row_num,col_num,row_num*col_num+image_plot+1)
plt.imshow(rec_file), plt.axis('off')
plt.savefig('results/plot_result.pdf')
| 33.666667
| 102
| 0.69802
|
14626dfd6148ddfbab32b912c452a6d952ebe064
| 13,317
|
py
|
Python
|
plenum/test/view_change/helper.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/view_change/helper.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/view_change/helper.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
import types
from stp_core.types import HA
from plenum.test.delayers import delayNonPrimaries, delay_3pc_messages, reset_delays_and_process_delayeds, \
icDelay
from plenum.test.helper import checkViewNoForNodes, sendRandomRequests, \
sendReqsToNodesAndVerifySuffReplies, send_reqs_to_nodes_and_verify_all_replies
from plenum.test.pool_transactions.helper import \
disconnect_node_and_ensure_disconnected
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.test_node import get_master_primary_node, ensureElectionsDone, \
TestNode, checkNodesConnected
from stp_core.common.log import getlogger
from stp_core.loop.eventually import eventually
from plenum.test import waits
from plenum.common.config_helper import PNodeConfigHelper
logger = getlogger()
def start_stopped_node(stopped_node, looper, tconf,
tdir, allPluginsPath,
delay_instance_change_msgs=True):
nodeHa, nodeCHa = HA(*
stopped_node.nodestack.ha), HA(*
stopped_node.clientstack.ha)
config_helper = PNodeConfigHelper(stopped_node.name, tconf, chroot=tdir)
restarted_node = TestNode(stopped_node.name,
config_helper=config_helper,
config=tconf,
ha=nodeHa, cliha=nodeCHa,
pluginPaths=allPluginsPath)
looper.add(restarted_node)
# Even after reconnection INSTANCE_CHANGE messages are received,
# delay them enough to simulate real disconnection. This needs to fixed
# soon when simulating a disconnection drains the transport queues
# TODO is it still actual?
if delay_instance_change_msgs:
restarted_node.nodeIbStasher.delay(icDelay(200))
return restarted_node
def provoke_and_check_view_change(nodes, newViewNo, wallet, client):
if {n.viewNo for n in nodes} == {newViewNo}:
return True
# If throughput of every node has gone down then check that
# view has changed
tr = [n.monitor.isMasterThroughputTooLow() for n in nodes]
if all(tr):
logger.info('Throughput ratio gone down, its {}'.format(tr))
checkViewNoForNodes(nodes, newViewNo)
else:
logger.info('Master instance has not degraded yet, '
'sending more requests')
sendRandomRequests(wallet, client, 10)
assert False
def provoke_and_wait_for_view_change(looper,
nodeSet,
expectedViewNo,
wallet,
client,
customTimeout=None):
timeout = customTimeout or waits.expectedPoolViewChangeStartedTimeout(
len(nodeSet))
# timeout *= 30
return looper.run(eventually(provoke_and_check_view_change,
nodeSet,
expectedViewNo,
wallet,
client,
timeout=timeout))
def simulate_slow_master(looper, nodeSet, wallet,
client, delay=10, num_reqs=4):
m_primary_node = get_master_primary_node(list(nodeSet.nodes.values()))
# Delay processing of PRE-PREPARE from all non primary replicas of master
# so master's performance falls and view changes
delayNonPrimaries(nodeSet, 0, delay)
sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, num_reqs)
return m_primary_node
def ensure_view_change(looper, nodes, exclude_from_check=None,
custom_timeout=None):
"""
This method patches the master performance check to return False and thus
ensures that all given nodes do a view change
"""
old_view_no = checkViewNoForNodes(nodes)
old_meths = {node.name: {} for node in nodes}
view_changes = {}
for node in nodes:
old_meths[node.name]['isMasterDegraded'] = node.monitor.isMasterDegraded
old_meths[node.name]['_update_new_ordered_reqs_count'] = node._update_new_ordered_reqs_count
view_changes[node.name] = node.monitor.totalViewChanges
def slow_master(self):
# Only allow one view change
rv = self.totalViewChanges == view_changes[self.name]
if rv:
logger.info('{} making master look slow'.format(self))
return rv
node.monitor.isMasterDegraded = types.MethodType(
slow_master, node.monitor)
node._update_new_ordered_reqs_count = types.MethodType(
lambda self: True, node)
perf_check_freq = next(iter(nodes)).config.PerfCheckFreq
timeout = custom_timeout or waits.expectedPoolViewChangeStartedTimeout(
len(nodes)) + perf_check_freq
nodes_to_check = nodes if exclude_from_check is None else [
n for n in nodes if n not in exclude_from_check]
logger.debug('Checking view no for nodes {}'.format(nodes_to_check))
looper.run(eventually(checkViewNoForNodes, nodes_to_check, old_view_no + 1,
retryWait=1, timeout=timeout))
logger.debug('Patching back perf check for all nodes')
for node in nodes:
node.monitor.isMasterDegraded = old_meths[node.name]['isMasterDegraded']
node._update_new_ordered_reqs_count = old_meths[node.name]['_update_new_ordered_reqs_count']
return old_view_no + 1
def ensure_several_view_change(looper, nodes, vc_count=1,
exclude_from_check=None, custom_timeout=None):
"""
This method patches the master performance check to return False and thus
ensures that all given nodes do a view change
Also, this method can do several view change.
If you try do several view_change by calling ensure_view_change,
than monkeypatching method isMasterDegraded would work unexpectedly.
Therefore, we return isMasterDegraded only after doing view_change needed count
"""
old_meths = {}
view_changes = {}
expected_view_no = None
for node in nodes:
old_meths[node.name] = node.monitor.isMasterDegraded
for __ in range(vc_count):
old_view_no = checkViewNoForNodes(nodes)
expected_view_no = old_view_no + 1
for node in nodes:
view_changes[node.name] = node.monitor.totalViewChanges
def slow_master(self):
# Only allow one view change
rv = self.totalViewChanges == view_changes[self.name]
if rv:
logger.info('{} making master look slow'.format(self))
return rv
node.monitor.isMasterDegraded = types.MethodType(slow_master, node.monitor)
perf_check_freq = next(iter(nodes)).config.PerfCheckFreq
timeout = custom_timeout or waits.expectedPoolViewChangeStartedTimeout(len(nodes)) + perf_check_freq
nodes_to_check = nodes if exclude_from_check is None else [n for n in nodes if n not in exclude_from_check]
logger.debug('Checking view no for nodes {}'.format(nodes_to_check))
looper.run(eventually(checkViewNoForNodes, nodes_to_check, expected_view_no, retryWait=1, timeout=timeout))
ensureElectionsDone(looper=looper, nodes=nodes, customTimeout=timeout)
ensure_all_nodes_have_same_data(looper, nodes, custom_timeout=timeout, exclude_from_check=exclude_from_check)
return expected_view_no
def ensure_view_change_by_primary_restart(
looper, nodes,
tconf, tdirWithPoolTxns, allPluginsPath, customTimeout=None):
"""
This method stops current primary for a while to force a view change
Returns new set of nodes
"""
old_view_no = checkViewNoForNodes(nodes)
primaryNode = [node for node in nodes if node.has_master_primary][0]
logger.debug("Disconnect current primary node {} from others, "
"current viewNo {}".format(primaryNode, old_view_no))
disconnect_node_and_ensure_disconnected(looper, nodes,
primaryNode, stopNode=True)
looper.removeProdable(primaryNode)
remainingNodes = list(set(nodes) - {primaryNode})
logger.debug("Waiting for viewNo {} for nodes {}"
"".format(old_view_no + 1, remainingNodes))
timeout = customTimeout or waits.expectedPoolViewChangeStartedTimeout(
len(remainingNodes)) + nodes[0].config.ToleratePrimaryDisconnection
looper.run(eventually(checkViewNoForNodes, remainingNodes, old_view_no + 1,
retryWait=1, timeout=timeout))
logger.debug("Starting stopped ex-primary {}".format(primaryNode))
restartedNode = start_stopped_node(primaryNode, looper, tconf,
tdirWithPoolTxns, allPluginsPath,
delay_instance_change_msgs=False)
nodes = remainingNodes + [restartedNode]
logger.debug("Ensure all nodes are connected")
looper.run(checkNodesConnected(nodes))
logger.debug("Ensure all nodes have the same data")
ensure_all_nodes_have_same_data(looper, nodes=nodes)
return nodes
def check_each_node_reaches_same_end_for_view(nodes, view_no):
# Check if each node agreed on the same ledger summary and last ordered
# seq no for same view
args = {}
vals = {}
for node in nodes:
params = [e.params for e in node.replicas[0].spylog.getAll(
node.replicas[0].primary_changed.__name__)
if e.params['view_no'] == view_no]
assert params
args[node.name] = (params[0]['last_ordered_pp_seq_no'],
params[0]['ledger_summary'])
vals[node.name] = node.replicas[0].view_ends_at[view_no - 1]
arg = list(args.values())[0]
for a in args.values():
assert a == arg
val = list(args.values())[0]
for v in vals.values():
assert v == val
def do_vc(looper, nodes, client, wallet, old_view_no=None):
sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
new_view_no = ensure_view_change(looper, nodes)
if old_view_no:
assert new_view_no - old_view_no >= 1
return new_view_no
def disconnect_master_primary(nodes):
pr_node = get_master_primary_node(nodes)
for node in nodes:
if node != pr_node:
node.nodestack.getRemote(pr_node.nodestack.name).disconnect()
return pr_node
def check_replica_queue_empty(node):
replica = node.replicas[0]
assert len(replica.prePrepares) == 0
assert len(replica.prePreparesPendingFinReqs) == 0
assert len(replica.prepares) == 0
assert len(replica.sentPrePrepares) == 0
assert len(replica.batches) == 0
assert len(replica.commits) == 0
assert len(replica.commitsWaitingForPrepare) == 0
assert len(replica.ordered) == 0
def check_all_replica_queue_empty(nodes):
for node in nodes:
check_replica_queue_empty(node)
def ensure_view_change_complete(looper, nodes, exclude_from_check=None,
customTimeout=None):
ensure_view_change(looper, nodes)
ensureElectionsDone(looper=looper, nodes=nodes,
customTimeout=customTimeout)
ensure_all_nodes_have_same_data(looper, nodes, customTimeout,
exclude_from_check=exclude_from_check)
def ensure_view_change_complete_by_primary_restart(
looper, nodes, tconf, tdirWithPoolTxns, allPluginsPath):
nodes = ensure_view_change_by_primary_restart(
looper, nodes, tconf, tdirWithPoolTxns, allPluginsPath)
ensureElectionsDone(looper=looper, nodes=nodes)
ensure_all_nodes_have_same_data(looper, nodes)
return nodes
def view_change_in_between_3pc(looper, nodes, slow_nodes, wallet, client,
slow_delay=1, wait=None):
send_reqs_to_nodes_and_verify_all_replies(looper, wallet, client, 4)
delay_3pc_messages(slow_nodes, 0, delay=slow_delay)
sendRandomRequests(wallet, client, 10)
if wait:
looper.runFor(wait)
ensure_view_change_complete(looper, nodes, customTimeout=60)
reset_delays_and_process_delayeds(slow_nodes)
sendReqsToNodesAndVerifySuffReplies(
looper, wallet, client, 5, total_timeout=30)
send_reqs_to_nodes_and_verify_all_replies(
looper, wallet, client, 5, total_timeout=30)
def view_change_in_between_3pc_random_delays(
looper,
nodes,
slow_nodes,
wallet,
client,
tconf,
min_delay=0,
max_delay=0):
send_reqs_to_nodes_and_verify_all_replies(looper, wallet, client, 4)
# max delay should not be more than catchup timeout.
max_delay = max_delay or tconf.MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE - 1
delay_3pc_messages(slow_nodes, 0, min_delay=min_delay, max_delay=max_delay)
sendRandomRequests(wallet, client, 10)
ensure_view_change_complete(looper,
nodes,
customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT + max_delay,
exclude_from_check=['check_last_ordered_3pc'])
reset_delays_and_process_delayeds(slow_nodes)
send_reqs_to_nodes_and_verify_all_replies(looper, wallet, client, 10)
| 39.871257
| 116
| 0.674476
|
3a8622dd04aff228b6faaba351071af6c838ad1f
| 676
|
py
|
Python
|
manage.py
|
EHoggard/Price-Comparison-Group2-BE
|
03175b360fb20f682730cd7b7f3fc80e2696dc00
|
[
"MIT"
] | 1
|
2021-07-24T14:43:02.000Z
|
2021-07-24T14:43:02.000Z
|
manage.py
|
EHoggard/Price-Comparison-Group2-BE
|
03175b360fb20f682730cd7b7f3fc80e2696dc00
|
[
"MIT"
] | null | null | null |
manage.py
|
EHoggard/Price-Comparison-Group2-BE
|
03175b360fb20f682730cd7b7f3fc80e2696dc00
|
[
"MIT"
] | 16
|
2021-07-15T06:16:25.000Z
|
2021-08-20T06:06:02.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'price_comparison_gp2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.391304
| 84
| 0.684911
|
c4f441e7840f2f88a458bed5be55d025c0ebd6a6
| 6,519
|
py
|
Python
|
test/functional/feature_nulldummy.py
|
pniwre/RavencoinLite
|
5bf2109408ba8128f3b08b7f94424f78891e9475
|
[
"MIT"
] | 85
|
2018-01-28T11:36:04.000Z
|
2022-03-12T01:50:34.000Z
|
test/functional/feature_nulldummy.py
|
pniwre/RavencoinLite
|
5bf2109408ba8128f3b08b7f94424f78891e9475
|
[
"MIT"
] | 42
|
2021-11-11T02:57:44.000Z
|
2022-03-27T21:31:38.000Z
|
test/functional/feature_nulldummy.py
|
pniwre/RavencoinLite
|
5bf2109408ba8128f3b08b7f94424f78891e9475
|
[
"MIT"
] | 39
|
2018-02-24T21:01:54.000Z
|
2021-08-15T16:05:02.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate 427 more blocks.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
"""
from test_framework.test_framework import RavenTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment
from test_framework.script import CScript
from io import BytesIO
import time
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
assert(len(i) == 0)
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(RavenTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [['-whitelist=127.0.0.1', '-walletprematurewitness']]
def run_test(self):
self.address = self.nodes[0].getnewaddress()
self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])
self.wit_address = self.nodes[0].addwitnessaddress(self.address)
self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address)
NetworkThread().start() # Start up network handling in another thread
self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(427) # Block 429
self.lastblockhash = self.nodes[0].getbestblockhash()
self.tip = int("0x" + self.lastblockhash, 0)
self.lastblockheight = 429
self.lastblocktime = int(time.time()) + 429
self.log.info("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
txid1 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[0].serialize_with_witness()), True)
test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48))
txid2 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[1].serialize_with_witness()), True)
test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49))
txid3 = self.nodes[0].sendrawtransaction(bytes_to_hex_str(test1txs[2].serialize_with_witness()), True)
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 47)
trueDummy(test2tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test2tx.serialize_with_witness()), True)
self.log.info("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
self.block_submit(self.nodes[0], [test2tx], False, True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = self.create_transaction(self.nodes[0], test2tx.hash, self.address, 46)
test6txs=[CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test4tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test4tx])
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, bytes_to_hex_str(test5tx.serialize_with_witness()), True)
self.block_submit(self.nodes[0], [test5tx], True)
self.log.info("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
for i in test6txs:
self.nodes[0].sendrawtransaction(bytes_to_hex_str(i.serialize_with_witness()), True)
self.block_submit(self.nodes[0], test6txs, True, True)
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def block_submit(self, node, txs, witness = False, accept = False):
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
node.submitblock(bytes_to_hex_str(block.serialize(True)))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
| 49.015038
| 145
| 0.701488
|
1d5242613b0f7a66404dce27fa3f3ca8b5b6b43d
| 978
|
py
|
Python
|
projects/memnn_feedback/tasks/dbll_babi/build.py
|
mohan-chinnappan-n/ParlAI
|
ddf10373339390408c92d8765d7ff9ae00e4204e
|
[
"MIT"
] | 258
|
2020-04-10T07:01:06.000Z
|
2022-03-26T11:49:30.000Z
|
projects/memnn_feedback/tasks/dbll_babi/build.py
|
jacklee20151/ParlAI
|
b4a442018d1a46f9374547ef0a9b8134b7bb4944
|
[
"MIT"
] | 33
|
2020-04-10T04:28:51.000Z
|
2022-03-31T02:52:02.000Z
|
projects/memnn_feedback/tasks/dbll_babi/build.py
|
jacklee20151/ParlAI
|
b4a442018d1a46f9374547ef0a9b8134b7bb4944
|
[
"MIT"
] | 43
|
2020-04-14T10:43:33.000Z
|
2022-03-13T02:27:54.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import os
def build(opt):
dpath = os.path.join(opt['datapath'], 'DBLL')
version = None
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
fname = 'dbll.tgz'
url = 'http://parl.ai/downloads/dbll/' + fname
build_data.download(url, dpath, fname)
build_data.untar(dpath, fname)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
| 31.548387
| 70
| 0.662577
|
2d8adf7cc2bcf10054f2f8879ebdce90e5036976
| 990
|
py
|
Python
|
djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/slack/provider.py
|
DemarcusL/django_wiki_lab
|
3b7cf18af7e0f89c94d10eb953ca018a150a2f55
|
[
"MIT"
] | 6,342
|
2015-01-01T07:40:30.000Z
|
2022-03-31T04:18:30.000Z
|
djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/slack/provider.py
|
DemarcusL/django_wiki_lab
|
3b7cf18af7e0f89c94d10eb953ca018a150a2f55
|
[
"MIT"
] | 2,198
|
2015-01-02T15:17:45.000Z
|
2022-03-28T10:20:43.000Z
|
djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/slack/provider.py
|
DemarcusL/django_wiki_lab
|
3b7cf18af7e0f89c94d10eb953ca018a150a2f55
|
[
"MIT"
] | 2,928
|
2015-01-01T10:44:13.000Z
|
2022-03-31T03:20:16.000Z
|
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class SlackAccount(ProviderAccount):
def get_avatar_url(self):
return self.account.extra_data.get("user").get("image_192", None)
def to_str(self):
dflt = super(SlackAccount, self).to_str()
return "%s (%s)" % (
self.account.extra_data.get("name", ""),
dflt,
)
class SlackProvider(OAuth2Provider):
id = "slack"
name = "Slack"
account_class = SlackAccount
def extract_uid(self, data):
return "%s_%s" % (
str(data.get("team").get("id")),
str(data.get("user").get("id")),
)
def extract_common_fields(self, data):
user = data.get("user", {})
return {"name": user.get("name"), "email": user.get("email", None)}
def get_default_scope(self):
return ["identify"]
provider_classes = [SlackProvider]
| 26.756757
| 75
| 0.616162
|
e4e945f018aabdec9b98d37ef2dcfd3ab4f803ba
| 1,188
|
py
|
Python
|
src/decode/app/web.py
|
emiel/decode-django
|
d887f1cbd78fabb632cc59955163d74ee659c695
|
[
"MIT"
] | null | null | null |
src/decode/app/web.py
|
emiel/decode-django
|
d887f1cbd78fabb632cc59955163d74ee659c695
|
[
"MIT"
] | null | null | null |
src/decode/app/web.py
|
emiel/decode-django
|
d887f1cbd78fabb632cc59955163d74ee659c695
|
[
"MIT"
] | null | null | null |
import re
from django import forms
from django.contrib import messages
from django.core.validators import RegexValidator
from django.shortcuts import render
from .api_client import ApiClient
input_re = re.compile(r"^\d+$")
class DecodeForm(forms.Form):
input_str = forms.CharField(
label="Input", max_length=30, validators=[RegexValidator(regex=input_re)]
)
def _process(input_str, request):
result = []
api_client = ApiClient()
try:
response = api_client.decode(input_str)
except Exception as err:
messages.error(request, str(err))
else:
if response.status_code == 200:
result = response.json()["result"]
elif response.status_code == 400:
messages.info(request, "Invalid input: {}".format(input_str))
return result
def decode(request):
result = []
if request.method == "POST":
form = DecodeForm(request.POST)
if form.is_valid():
input_str = form.cleaned_data["input_str"]
result = _process(input_str, request)
else:
form = DecodeForm()
return render(request, "app/index.html", {"form": form, "result": result})
| 23.294118
| 81
| 0.649832
|
bbdaeeafd6c5c3a74ea79536b50008502e8a7a94
| 17,597
|
py
|
Python
|
tests/client_connectivity/test_vlan_mode.py
|
brennerm/wlan-testing
|
ea99d5ab74177198324f4d7eddcdcff2844bbbf3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/client_connectivity/test_vlan_mode.py
|
brennerm/wlan-testing
|
ea99d5ab74177198324f4d7eddcdcff2844bbbf3
|
[
"BSD-3-Clause"
] | null | null | null |
tests/client_connectivity/test_vlan_mode.py
|
brennerm/wlan-testing
|
ea99d5ab74177198324f4d7eddcdcff2844bbbf3
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import sys
for folder in 'py-json', 'py-scripts':
if folder not in sys.path:
sys.path.append(f'../lanforge/lanforge-scripts/{folder}')
sys.path.append(f"../lanforge/lanforge-scripts/py-scripts/tip-cicd-something")
sys.path.append(f'../libs')
sys.path.append(f'../libs/lanforge/')
from LANforge.LFUtils import *
from configuration_data import TEST_CASES
if 'py-json' not in sys.path:
sys.path.append('../py-scripts')
import sta_connect2
from sta_connect2 import StaConnect2
import eap_connect
from eap_connect import EAPConnect
import time
@pytest.mark.run(order=25)
@pytest.mark.vlan
class TestVlanModeClientConnectivity(object):
@pytest.mark.wpa
@pytest.mark.twog
def test_client_wpa_2g(self, request, get_lanforge_data, setup_profile_data, instantiate_testrail, instantiate_project):
profile_data = setup_profile_data["VLAN"]["WPA"]["2G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_2dot4g_prefix"] + "0" + str(i))
print(profile_data, get_lanforge_data)
staConnect = StaConnect2(get_lanforge_data["lanforge_ip"], int(get_lanforge_data["lanforge-port-number"]),
debug_=False)
staConnect.sta_mode = 0
staConnect.upstream_resource = 1
staConnect.upstream_port = get_lanforge_data["lanforge_vlan_port"]
staConnect.radio = get_lanforge_data["lanforge_2dot4g"]
staConnect.resource = 1
staConnect.dut_ssid = profile_data["ssid_name"]
staConnect.dut_passwd = profile_data["security_key"]
staConnect.dut_security = "wpa"
staConnect.station_names = station_names
staConnect.sta_prefix = get_lanforge_data["lanforge_2dot4g_prefix"]
staConnect.runtime_secs = 10
staConnect.bringup_time_sec = 60
staConnect.cleanup_on_exit = True
# staConnect.cleanup()
staConnect.setup()
staConnect.start()
print("napping %f sec" % staConnect.runtime_secs)
time.sleep(staConnect.runtime_secs)
staConnect.stop()
staConnect.cleanup()
run_results = staConnect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", staConnect.passes)
if staConnect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["2g_wpa_vlan"], run_id=instantiate_project,
status_id=1,
msg='2G WPA Client Connectivity Passed successfully - vlan mode')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["2g_wpa_vlan"], run_id=instantiate_project,
status_id=5,
msg='2G WPA Client Connectivity Failed - vlan mode')
assert staConnect.passes()
# C2420
@pytest.mark.wpa
@pytest.mark.fiveg
def test_client_wpa_5g(self, request, get_lanforge_data, setup_profile_data, instantiate_project, instantiate_testrail):
profile_data = setup_profile_data["VLAN"]["WPA"]["5G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_5g_prefix"] + "0" + str(i))
staConnect = StaConnect2(get_lanforge_data["lanforge_ip"], int(get_lanforge_data["lanforge-port-number"]),
debug_=False)
staConnect.sta_mode = 0
staConnect.upstream_resource = 1
staConnect.upstream_port = get_lanforge_data["lanforge_vlan_port"]
staConnect.radio = get_lanforge_data["lanforge_5g"]
staConnect.resource = 1
staConnect.dut_ssid = profile_data["ssid_name"]
staConnect.dut_passwd = profile_data["security_key"]
staConnect.dut_security = "wpa"
staConnect.station_names = station_names
staConnect.sta_prefix = get_lanforge_data["lanforge_5g_prefix"]
staConnect.runtime_secs = 10
staConnect.bringup_time_sec = 60
staConnect.cleanup_on_exit = True
# staConnect.cleanup()
staConnect.setup()
staConnect.start()
print("napping %f sec" % staConnect.runtime_secs)
time.sleep(staConnect.runtime_secs)
staConnect.stop()
staConnect.cleanup()
run_results = staConnect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", staConnect.passes)
if staConnect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["5g_wpa_vlan"], run_id=instantiate_project,
status_id=1,
msg='5G WPA Client Connectivity Passed successfully - vlan mode')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["5g_wpa_vlan"], run_id=instantiate_project,
status_id=5,
msg='5G WPA Client Connectivity Failed - vlan mode')
assert staConnect.passes()
# C2419
@pytest.mark.wpa2_personal
@pytest.mark.twog
def test_client_wpa2_personal_2g(self, request, get_lanforge_data, setup_profile_data, instantiate_project, instantiate_testrail):
profile_data = setup_profile_data["VLAN"]["WPA2_P"]["2G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_2dot4g_prefix"] + "0" + str(i))
staConnect = StaConnect2(get_lanforge_data["lanforge_ip"], int(get_lanforge_data["lanforge-port-number"]),
debug_=False)
staConnect.sta_mode = 0
staConnect.upstream_resource = 1
staConnect.upstream_port = get_lanforge_data["lanforge_vlan_port"]
staConnect.radio = get_lanforge_data["lanforge_2dot4g"]
staConnect.resource = 1
staConnect.dut_ssid = profile_data["ssid_name"]
staConnect.dut_passwd = profile_data["security_key"]
staConnect.dut_security = "wpa2"
staConnect.station_names = station_names
staConnect.sta_prefix = get_lanforge_data["lanforge_2dot4g_prefix"]
staConnect.runtime_secs = 10
staConnect.bringup_time_sec = 60
staConnect.cleanup_on_exit = True
# staConnect.cleanup()
staConnect.setup()
staConnect.start()
print("napping %f sec" % staConnect.runtime_secs)
time.sleep(staConnect.runtime_secs)
staConnect.stop()
staConnect.cleanup()
run_results = staConnect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", staConnect.passes)
if staConnect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["2g_wpa2_vlan"], run_id=instantiate_project,
status_id=1,
msg='2G WPA2 Client Connectivity Passed successfully - vlan mode')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["2g_wpa2_vlan"], run_id=instantiate_project,
status_id=5,
msg='2G WPA2 Client Connectivity Failed - vlan mode')
assert staConnect.passes()
# C2237
@pytest.mark.wpa2_personal
@pytest.mark.fiveg
def test_client_wpa2_personal_5g(self, request, get_lanforge_data, setup_profile_data, instantiate_project, instantiate_testrail):
profile_data = setup_profile_data["VLAN"]["WPA2_P"]["5G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_5g_prefix"] + "0" + str(i))
staConnect = StaConnect2(get_lanforge_data["lanforge_ip"], int(get_lanforge_data["lanforge-port-number"]),
debug_=False)
staConnect.sta_mode = 0
staConnect.upstream_resource = 1
staConnect.upstream_port = get_lanforge_data["lanforge_vlan_port"]
staConnect.radio = get_lanforge_data["lanforge_5g"]
staConnect.resource = 1
staConnect.dut_ssid = profile_data["ssid_name"]
staConnect.dut_passwd = profile_data["security_key"]
staConnect.dut_security = "wpa2"
staConnect.station_names = station_names
staConnect.sta_prefix = get_lanforge_data["lanforge_5g_prefix"]
staConnect.runtime_secs = 10
staConnect.bringup_time_sec = 60
staConnect.cleanup_on_exit = True
# staConnect.cleanup()
staConnect.setup()
staConnect.start()
print("napping %f sec" % staConnect.runtime_secs)
time.sleep(staConnect.runtime_secs)
staConnect.stop()
staConnect.cleanup()
run_results = staConnect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", staConnect.passes)
if staConnect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["5g_wpa2_vlan"], run_id=instantiate_project,
status_id=1,
msg='5G WPA2 Client Connectivity Passed successfully - vlan mode')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["5g_wpa2_vlan"], run_id=instantiate_project,
status_id=5,
msg='5G WPA2 Client Connectivity Failed - vlan mode')
assert staConnect.passes()
# C2236
@pytest.mark.wpa2_enterprise
@pytest.mark.twog
def test_client_wpa2_enterprise_2g(self, request, get_lanforge_data, setup_profile_data, instantiate_project, instantiate_testrail):
profile_data = setup_profile_data["VLAN"]["WPA2_E"]["2G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_2dot4g_prefix"] + "0" + str(i))
eap_connect = EAPConnect(get_lanforge_data["lanforge_ip"], get_lanforge_data["lanforge-port-number"])
eap_connect.upstream_resource = 1
eap_connect.upstream_port = get_lanforge_data["lanforge_vlan_port"]
eap_connect.security = "wpa2"
eap_connect.sta_list = station_names
eap_connect.station_names = station_names
eap_connect.sta_prefix = get_lanforge_data["lanforge_2dot4g_prefix"]
eap_connect.ssid = profile_data["ssid_name"]
eap_connect.radio = get_lanforge_data["lanforge_2dot4g"]
eap_connect.eap = "TTLS"
eap_connect.identity = "nolaradius"
eap_connect.ttls_passwd = "nolastart"
eap_connect.runtime_secs = 10
eap_connect.setup()
eap_connect.start()
print("napping %f sec" % eap_connect.runtime_secs)
time.sleep(eap_connect.runtime_secs)
eap_connect.stop()
try:
eap_connect.cleanup()
eap_connect.cleanup()
except:
pass
run_results = eap_connect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", eap_connect.passes)
if eap_connect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["2g_eap_vlan"], run_id=instantiate_project,
status_id=1,
msg='5G WPA2 ENTERPRISE Client Connectivity Passed successfully - '
'vlan mode')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["2g_eap_vlan"], run_id=instantiate_project,
status_id=5,
msg='5G WPA2 ENTERPRISE Client Connectivity Failed - vlan mode')
assert eap_connect.passes()
# C5214
@pytest.mark.wpa2_enterprise
@pytest.mark.fiveg
def test_client_wpa2_enterprise_5g(self, request, get_lanforge_data, setup_profile_data, instantiate_project, instantiate_testrail):
profile_data = setup_profile_data["VLAN"]["WPA2_E"]["5G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_5g_prefix"] + "0" + str(i))
eap_connect = EAPConnect(get_lanforge_data["lanforge_ip"], get_lanforge_data["lanforge-port-number"])
eap_connect.upstream_resource = 1
eap_connect.upstream_port = get_lanforge_data["lanforge_vlan_port"]
eap_connect.security = "wpa2"
eap_connect.sta_list = station_names
eap_connect.station_names = station_names
eap_connect.sta_prefix = get_lanforge_data["lanforge_5g_prefix"]
eap_connect.ssid = profile_data["ssid_name"]
eap_connect.radio = get_lanforge_data["lanforge_5g"]
eap_connect.eap = "TTLS"
eap_connect.identity = "nolaradius"
eap_connect.ttls_passwd = "nolastart"
eap_connect.runtime_secs = 10
eap_connect.setup()
eap_connect.start()
print("napping %f sec" % eap_connect.runtime_secs)
time.sleep(eap_connect.runtime_secs)
eap_connect.stop()
try:
eap_connect.cleanup()
eap_connect.cleanup()
except:
pass
run_results = eap_connect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", eap_connect.passes)
if eap_connect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["5g_eap_vlan"], run_id=instantiate_project,
status_id=1,
msg='5G WPA2 ENTERPRISE Client Connectivity Passed successfully - '
'vlan mode')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["5g_eap_vlan"], run_id=instantiate_project,
status_id=5,
msg='5G WPA2 ENTERPRISE Client Connectivity Failed - vlan mode')
assert eap_connect.passes()
@pytest.mark.modify_ssid
@pytest.mark.parametrize(
'update_ssid',
(["VLAN, WPA, 5G, Sanity-updated-5G-WPA-VLAN"]),
indirect=True
)
def test_modify_ssid(self, request, update_ssid, get_lanforge_data, setup_profile_data, instantiate_testrail,
instantiate_project):
profile_data = setup_profile_data["VLAN"]["WPA"]["5G"]
station_names = []
for i in range(0, int(request.config.getini("num_stations"))):
station_names.append(get_lanforge_data["lanforge_5g_prefix"] + "0" + str(i))
staConnect = StaConnect2(get_lanforge_data["lanforge_ip"], int(get_lanforge_data["lanforge-port-number"]),
debug_=False)
staConnect.sta_mode = 0
staConnect.upstream_resource = 1
staConnect.upstream_port = get_lanforge_data["lanforge_bridge_port"]
staConnect.radio = get_lanforge_data["lanforge_5g"]
staConnect.resource = 1
staConnect.dut_ssid = profile_data["ssid_name"]
staConnect.dut_passwd = profile_data["security_key"]
staConnect.dut_security = "wpa"
staConnect.station_names = station_names
staConnect.sta_prefix = get_lanforge_data["lanforge_5g_prefix"]
staConnect.runtime_secs = 10
staConnect.bringup_time_sec = 60
staConnect.cleanup_on_exit = True
# staConnect.cleanup()
staConnect.setup()
staConnect.start()
print("napping %f sec" % staConnect.runtime_secs)
time.sleep(staConnect.runtime_secs)
staConnect.stop()
staConnect.cleanup()
run_results = staConnect.get_result_list()
for result in run_results:
print("test result: " + result)
# result = 'pass'
print("Single Client Connectivity :", staConnect.passes)
if staConnect.passes():
instantiate_testrail.update_testrail(case_id=TEST_CASES["vlan_ssid_update"], run_id=instantiate_project,
status_id=1,
msg='5G WPA Client Connectivity Passed successfully - vlan mode '
'updated ssid')
else:
instantiate_testrail.update_testrail(case_id=TEST_CASES["vlan_ssid_update"], run_id=instantiate_project,
status_id=5,
msg='5G WPA Client Connectivity Failed - vlan mode updated ssid')
assert staConnect.passes()
| 49.991477
| 136
| 0.623345
|
5d62cbc1522749605dad1c85645e8149bc6dabdf
| 3,657
|
py
|
Python
|
start_alphagoose_data_generators_numba.py
|
IsaiahPressman/Kaggle_Hungry_Geese
|
f4d9fcb0811704bd339ad5c7ff937dd0d9e25763
|
[
"MIT"
] | null | null | null |
start_alphagoose_data_generators_numba.py
|
IsaiahPressman/Kaggle_Hungry_Geese
|
f4d9fcb0811704bd339ad5c7ff937dd0d9e25763
|
[
"MIT"
] | null | null | null |
start_alphagoose_data_generators_numba.py
|
IsaiahPressman/Kaggle_Hungry_Geese
|
f4d9fcb0811704bd339ad5c7ff937dd0d9e25763
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import torch
from torch import nn
from hungry_geese.training.alphagoose.alphagoose_data_generator import get_latest_weights_file
from hungry_geese.training.alphagoose.alphagoose_data_generator_numba import start_selfplay_loop
from hungry_geese.env import goose_env as ge
from hungry_geese.nns import models, conv_blocks
if __name__ == '__main__':
device = torch.device('cuda')
obs_type = ge.ObsType.COMBINED_GRADIENT_OBS_SMALL
n_channels = 64
activation = nn.ReLU
normalize = False
use_mhsa = True
model_kwargs = dict(
block_class=conv_blocks.BasicConvolutionalBlock,
block_kwargs=[
dict(
in_channels=obs_type.get_obs_spec()[-3],
out_channels=n_channels,
kernel_size=3,
activation=activation,
normalize=normalize,
use_mhsa=False
),
dict(
in_channels=n_channels,
out_channels=n_channels,
kernel_size=3,
activation=activation,
normalize=normalize,
use_mhsa=False
),
dict(
in_channels=n_channels,
out_channels=n_channels,
kernel_size=3,
activation=activation,
normalize=normalize,
use_mhsa=False
),
dict(
in_channels=n_channels,
out_channels=n_channels,
kernel_size=3,
activation=activation,
normalize=normalize,
use_mhsa=False
),
dict(
in_channels=n_channels,
out_channels=n_channels,
kernel_size=3,
activation=activation,
normalize=normalize,
use_mhsa=False
),
dict(
in_channels=n_channels,
out_channels=n_channels,
kernel_size=3,
activation=activation,
normalize=normalize,
use_mhsa=False
),
dict(
in_channels=n_channels,
out_channels=n_channels,
kernel_size=3,
activation=activation,
normalize=normalize,
use_mhsa=True,
mhsa_heads=4,
),
dict(
in_channels=n_channels,
out_channels=n_channels,
kernel_size=3,
activation=activation,
normalize=normalize,
use_mhsa=use_mhsa,
mhsa_heads=4,
),
],
squeeze_excitation=True,
cross_normalize_value=True,
use_separate_action_value_heads=True,
# **ge.RewardType.RANK_ON_DEATH.get_recommended_value_activation_scale_shift_dict()
)
model = models.FullConvActorCriticNetwork(**model_kwargs)
weights_dir = Path(
'runs/alphagoose/alphagoose_combined_gradient_obs_rank_on_death_lethal_5_blocks_64_dims_v0/all_checkpoints_pt'
)
print(f'Loading initial model weights from: {get_latest_weights_file(weights_dir)}')
dataset_dir = Path('/home/isaiah/data/alphagoose_data')
dataset_dir.mkdir(exist_ok=True)
print(f'Saving self-play data to: {dataset_dir}')
start_selfplay_loop(
model=model,
device=device,
dataset_dir=dataset_dir,
weights_dir=weights_dir,
max_saved_batches=10000,
obs_type=obs_type,
allow_resume=True
)
| 32.362832
| 118
| 0.567405
|
1980596c18d8619bf217274e6e710d4cf2438215
| 9,272
|
py
|
Python
|
tests/services/server_test.py
|
mcunha/forseti-security
|
cbf25f6173c1a25d4e43a9738eca73f927361cb8
|
[
"Apache-2.0"
] | null | null | null |
tests/services/server_test.py
|
mcunha/forseti-security
|
cbf25f6173c1a25d4e43a9738eca73f927361cb8
|
[
"Apache-2.0"
] | null | null | null |
tests/services/server_test.py
|
mcunha/forseti-security
|
cbf25f6173c1a25d4e43a9738eca73f927361cb8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit Tests: Forseti Server."""
import mock
import os
import unittest
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.services import server
class NameSpace(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class ServerTest(ForsetiTestCase):
"""Test Forseti Server."""
TEST_RESOURCE_DIR_PATH = os.path.join(
os.path.dirname(__file__), 'test_data')
@mock.patch('google.cloud.forseti.services.server.ModelManager', mock.MagicMock())
@mock.patch('google.cloud.forseti.services.server.create_engine')
def test_server_config_update_bad_default_path(self, test_patch):
test_patch.return_value = None
config_file_path = ''
db_conn_str = ''
endpoint = ''
config = server.ServiceConfig(config_file_path, db_conn_str, endpoint)
is_success, err_msg = config.update_configuration()
has_err_msg = len(err_msg) > 0
self.assertFalse(is_success)
self.assertTrue(has_err_msg)
@mock.patch('google.cloud.forseti.services.server.ModelManager', mock.MagicMock())
@mock.patch('google.cloud.forseti.services.server.create_engine')
def test_server_config_update_good_default_bad_update_path(self, test_patch):
test_patch.return_value = None
config_file_path = os.path.join(self.TEST_RESOURCE_DIR_PATH,
'forseti_conf_server.yaml')
db_conn_str = ''
endpoint = ''
config = server.ServiceConfig(config_file_path, db_conn_str, endpoint)
is_success, err_msg = config.update_configuration()
has_err_msg = len(err_msg) > 0
self.assertTrue(is_success)
self.assertFalse(has_err_msg)
# Examine the contents in scanner config.
for scanner in config.get_scanner_config().get('scanners'):
# All the scanners are set to true in the default config file.
self.assertTrue(scanner.get('enabled'))
# Test update config with bad file path.
is_success, err_msg = config.update_configuration(
'this_is_a_bad_path.xyz')
has_err_msg = len(err_msg) > 0
self.assertFalse(is_success)
self.assertTrue(has_err_msg)
# Make sure if the new path is bad, we still keep the good changes
# from the default path, we can verify by examining the contents in
# the scanner config and see if it's the same as above.
for scanner in config.get_scanner_config().get('scanners'):
# All the scanners are set to true in the default config file.
self.assertTrue(scanner.get('enabled'))
@mock.patch('google.cloud.forseti.services.server.ModelManager', mock.MagicMock())
@mock.patch('google.cloud.forseti.services.server.create_engine')
def test_server_config_update_good_default_and_update_path(self, test_patch):
test_patch.return_value = None
config_file_path = os.path.join(self.TEST_RESOURCE_DIR_PATH,
'forseti_conf_server.yaml')
db_conn_str = ''
endpoint = ''
config = server.ServiceConfig(config_file_path, db_conn_str, endpoint)
_, _ = config.update_configuration()
new_config_file_path = os.path.join(self.TEST_RESOURCE_DIR_PATH,
'forseti_conf_server_new.yaml')
is_success, err_msg = config.update_configuration(new_config_file_path)
has_err_msg = len(err_msg) > 0
self.assertTrue(is_success)
self.assertFalse(has_err_msg)
# Examine the contents in scanner config.
for scanner in config.get_scanner_config().get('scanners'):
# All the scanners are set to false in the new config file.
self.assertFalse(scanner.get('enabled'))
# Test update again with default path will replace the changes.
is_success, err_msg = config.update_configuration()
has_err_msg = len(err_msg) > 0
self.assertTrue(is_success)
self.assertFalse(has_err_msg)
# Examine the contents in scanner config.
for scanner in config.get_scanner_config().get('scanners'):
# All the scanners are set to true in the default config file.
self.assertTrue(scanner.get('enabled'))
@mock.patch('google.cloud.forseti.services.server.argparse', autospec=True)
def test_services_not_specified(self, mock_argparse):
"""Test main() with no service specified."""
expected_exit_code = 1
mock_arg_parser = mock.MagicMock()
mock_argparse.ArgumentParser.return_value = mock_arg_parser
mock_arg_parser.parse_args.return_value = NameSpace(
endpoint='[::]:50051',
services=None,
forseti_db=None,
config_file_path=None,
log_level='info',
enable_console_log=False)
with self.assertRaises(SystemExit) as e:
server.main()
self.assertEquals(expected_exit_code, e.exception.code)
@mock.patch('google.cloud.forseti.services.server.argparse', autospec=True)
def test_config_file_path_not_specified(self, mock_argparse):
"""Test main() with no config_file_path specified."""
expected_exit_code = 2
mock_arg_parser = mock.MagicMock()
mock_argparse.ArgumentParser.return_value = mock_arg_parser
mock_arg_parser.parse_args.return_value = NameSpace(
endpoint='[::]:50051',
services=['scanner'],
forseti_db=None,
config_file_path=None,
log_level='info',
enable_console_log=False)
with self.assertRaises(SystemExit) as e:
server.main()
self.assertEquals(expected_exit_code, e.exception.code)
@mock.patch('google.cloud.forseti.services.server.argparse', autospec=True)
def test_config_file_path_non_readable_file(self, mock_argparse):
"""Test main() with non-readable config file."""
expected_exit_code = 3
mock_arg_parser = mock.MagicMock()
mock_argparse.ArgumentParser.return_value = mock_arg_parser
mock_arg_parser.parse_args.return_value = NameSpace(
endpoint='[::]:50051',
services=['scanner'],
forseti_db=None,
config_file_path='/this/does/not/exist',
log_level='info',
enable_console_log=False)
with self.assertRaises(SystemExit) as e:
server.main()
self.assertEquals(expected_exit_code, e.exception.code)
@mock.patch('google.cloud.forseti.services.server.argparse', autospec=True)
def test_config_file_path_non_existent_file(self, mock_argparse):
"""Test main() with non-existent config file."""
expected_exit_code = 4
mock_arg_parser = mock.MagicMock()
mock_argparse.ArgumentParser.return_value = mock_arg_parser
mock_arg_parser.parse_args.return_value = NameSpace(
endpoint='[::]:50051',
services=['scanner'],
forseti_db=None,
config_file_path='/what/ever',
log_level='info',
enable_console_log=False)
with mock.patch.object(server.os.path, "isfile") as mock_isfile:
mock_isfile.return_value = True
with mock.patch.object(server.os, "access") as mock_access:
mock_access.return_value = False
with self.assertRaises(SystemExit) as e:
server.main()
self.assertEquals(expected_exit_code, e.exception.code)
@mock.patch('google.cloud.forseti.services.server.argparse', autospec=True)
def test_forseti_db_not_set(self, mock_argparse):
"""Test main() with forseti_db not set."""
expected_exit_code = 5
mock_arg_parser = mock.MagicMock()
mock_argparse.ArgumentParser.return_value = mock_arg_parser
mock_arg_parser.parse_args.return_value = NameSpace(
endpoint='[::]:50051',
services=['scanner'],
forseti_db=None,
config_file_path='/what/ever',
log_level='info',
enable_console_log=False)
with mock.patch.object(server.os.path, "isfile") as mock_isfile:
mock_isfile.return_value = True
with mock.patch.object(server.os, "access") as mock_access:
mock_access.return_value = True
with self.assertRaises(SystemExit) as e:
server.main()
self.assertEquals(expected_exit_code, e.exception.code)
if __name__ == '__main__':
unittest.main()
| 37.844898
| 86
| 0.662209
|
56dfc8cf5d608b930a08bce79192dee9aca79ca6
| 2,500
|
py
|
Python
|
Protheus_WebApp/Modules/SIGAPLS/PLSA269TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 17
|
2018-09-24T17:27:08.000Z
|
2021-09-16T19:09:46.000Z
|
Protheus_WebApp/Modules/SIGAPLS/PLSA269TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 4
|
2018-09-24T17:30:32.000Z
|
2022-01-03T11:39:30.000Z
|
Protheus_WebApp/Modules/SIGAPLS/PLSA269TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 18
|
2019-06-07T17:41:34.000Z
|
2022-01-31T18:17:31.000Z
|
from tir import Webapp
import unittest
from tir.technologies.apw_internal import ApwInternal
import datetime
import time
DateSystem = datetime.datetime.today().strftime('%d/%m/%Y')
DateVal = datetime.datetime(2120, 5, 17)
"""-------------------------------------------------------------------
/*/{Protheus.doc} PLSA269TestCase
TIR - Casos de testes da rotina Cadastro de Datas de Pagamento
@author Silvia SantAnna
@since 11/2020
@version 12
-------------------------------------------------------------------"""
class PLSA269(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAPLS",DateSystem,"T1","M SP 01","33")
inst.oHelper.Program("PLSA269")
inst.oHelper.AddParameter("MV_PLSDTPG","" , ".T.")
inst.oHelper.SetParameters()
def test_PLSA269_001(self):
# Incluir
self.oHelper.SetButton("Incluir")
self.oHelper.SetValue("BXT_CODINT","0001", check_value = False)
self.oHelper.SetValue("BXT_ANO","2020")
self.oHelper.SetValue("BXT_MES","11")
self.oHelper.SetValue("BXT_REEMB", "0 - Nao")
# Grid
self.oHelper.ClickGridCell("Data de Pgto", row=1, grid_number=1)
self.oHelper.SetKey("Enter", grid=True, grid_number=1)
self.oHelper.SetValue("BXU_DATPAG","01/12/2020", check_value = False)
self.oHelper.SetButton("Salvar")
self.oHelper.SetButton("Salvar")
self.oHelper.SetButton("Cancelar")
# Alterar
self.oHelper.SetButton("Alterar")
# Grid
self.oHelper.ClickGridCell("Data de Pgto",row=1, grid_number=1)
self.oHelper.SetKey("Enter", grid=True, grid_number=1)
self.oHelper.SetValue("BXU_DATPAG","01/01/2021", check_value = False)
self.oHelper.SetButton("Ok") # A data não pode ser Sabado, Domingo ou Feriado
self.oHelper.SetValue("BXU_DATPAG","04/01/2021", check_value = False)
self.oHelper.SetButton("Salvar")
self.oHelper.SetButton("Salvar")
# Visualizar
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult("BXT_ANO","2020")
self.oHelper.SetButton("Confirmar")
# Pesquisar
self.oHelper.SetButton("Outras Ações", sub_item='Pesquisar')
self.oHelper.SetButton("Ok")
# Legenda
self.oHelper.SetButton("Outras Ações", sub_item='Legenda')
self.oHelper.SetButton("Fechar")
# Excluir
self.oHelper.SetButton("Outras Ações", sub_item='Excluir')
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton('x')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
| 30.487805
| 79
| 0.6908
|
44a90007725d100700233aaa48e15e61addc1f3c
| 2,802
|
py
|
Python
|
homeassistant/components/bloomsky.py
|
davidedmundson/home-assistant
|
cd02563552ffc28239fa17c79a5d9bc0013bd5ac
|
[
"MIT"
] | null | null | null |
homeassistant/components/bloomsky.py
|
davidedmundson/home-assistant
|
cd02563552ffc28239fa17c79a5d9bc0013bd5ac
|
[
"MIT"
] | null | null | null |
homeassistant/components/bloomsky.py
|
davidedmundson/home-assistant
|
cd02563552ffc28239fa17c79a5d9bc0013bd5ac
|
[
"MIT"
] | 1
|
2018-11-20T17:44:08.000Z
|
2018-11-20T17:44:08.000Z
|
"""
homeassistant.components.bloomsky
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for BloomSky weather station.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/bloomsky/
"""
import logging
from datetime import timedelta
import requests
from homeassistant.components import discovery
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle
DOMAIN = "bloomsky"
BLOOMSKY = None
_LOGGER = logging.getLogger(__name__)
# The BloomSky only updates every 5-8 minutes as per the API spec so there's
# no point in polling the API more frequently
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=300)
DISCOVER_SENSORS = 'bloomsky.sensors'
DISCOVER_BINARY_SENSORS = 'bloomsky.binary_sensor'
DISCOVER_CAMERAS = 'bloomsky.camera'
# pylint: disable=unused-argument,too-few-public-methods
def setup(hass, config):
""" Setup BloomSky component. """
if not validate_config(
config,
{DOMAIN: [CONF_API_KEY]},
_LOGGER):
return False
api_key = config[DOMAIN][CONF_API_KEY]
global BLOOMSKY
try:
BLOOMSKY = BloomSky(api_key)
except RuntimeError:
return False
for component, discovery_service in (
('camera', DISCOVER_CAMERAS), ('sensor', DISCOVER_SENSORS),
('binary_sensor', DISCOVER_BINARY_SENSORS)):
discovery.discover(hass, discovery_service, component=component,
hass_config=config)
return True
class BloomSky(object):
""" Handle all communication with the BloomSky API. """
# API documentation at http://weatherlution.com/bloomsky-api/
API_URL = "https://api.bloomsky.com/api/skydata"
def __init__(self, api_key):
self._api_key = api_key
self.devices = {}
_LOGGER.debug("Initial bloomsky device load...")
self.refresh_devices()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def refresh_devices(self):
"""
Uses the API to retreive a list of devices associated with an
account along with all the sensors on the device.
"""
_LOGGER.debug("Fetching bloomsky update")
response = requests.get(self.API_URL,
headers={"Authorization": self._api_key},
timeout=10)
if response.status_code == 401:
raise RuntimeError("Invalid API_KEY")
elif response.status_code != 200:
_LOGGER.error("Invalid HTTP response: %s", response.status_code)
return
# create dictionary keyed off of the device unique id
self.devices.update({
device["DeviceID"]: device for device in response.json()
})
| 30.791209
| 76
| 0.665239
|
b2bb99a3ccadc726e6f0782d81e1db1de70efd72
| 1,882
|
py
|
Python
|
Codebase/List/list_utils.py
|
ryderfang/LeetCode
|
be9470dee6175bab321f0549f173c0c682dddd77
|
[
"MIT"
] | 1
|
2021-05-06T02:27:30.000Z
|
2021-05-06T02:27:30.000Z
|
Codebase/List/list_utils.py
|
ryderfang/LeetCode
|
be9470dee6175bab321f0549f173c0c682dddd77
|
[
"MIT"
] | null | null | null |
Codebase/List/list_utils.py
|
ryderfang/LeetCode
|
be9470dee6175bab321f0549f173c0c682dddd77
|
[
"MIT"
] | null | null | null |
#! /usr/local/bin/python3
from typing import List
from functools import reduce
import itertools
def distinct_nested_list(nested_lst: List[List[int]]):
# array = set(map(lambda x: tuple(x), array))
# array = map(lambda x: list(x), array)
# return list(array)
return [list(y) for y in set([tuple(x) for x in nested_lst])]
def distinct_list(lst: List[int]):
return list(set(lst))
def permute(lst: List[int]):
return [list(x) for x in list(itertools.permutations(lst))]
#! reduce
# def permute(self, nums: List[int]) -> List[List[int]]:
# def reduct_func(a: List[int], b: int):
# tmp = []
# print(a, b)
# for l in a:
# for i in range(len(l) + 1):
# tmp.append(l[:i] + [b] + l[i:])
# return tmp
# ans = reduce(reduct_func, nums, [[]])
# return ans
def sum_list(lst: List[List[int]]):
return sum(lst, [])
if __name__ == "__main__":
nums = [1,2,3]
# enumerate
for i, v in enumerate(nums):
print(i, v)
# filter
print(list(filter(lambda x: x % 2 != 0, nums)))
# map
print(list(map(lambda a: a ** 2, nums)))
# reduce
print(reduce(lambda a, b: a * b, nums))
# init nested list, do not use [[-1] * 10] * 10, because
# !!! [[-1] * 10] * 10 share same address of each row.
nl = [x[:] for x in [[-1] * 10] * 10]
nl[0][0] = 1
#sort
tmp = [[1,2], [3,4], [6, 5]]
tmp.sort(key=lambda x: x[0], reverse=True)
# reverse
print(nums[::-1])
# swap
nums[0], nums[2] = nums[2], nums[0]
# get min/max of list
print(min(nums))
# assign
nums[1:3] = [0] * 2
# contact
tmp = [3,4,5]
print(nums)
print([*nums, *tmp]) # nums + tmp
print(sum_list([[1], [2,3], [4,5,6]]))
print(distinct_list([1, 1, 2, 3, 4, 4]))
print(distinct_nested_list([[1], [1, 2], [1, 2], [1, 2, 3]]))
| 26.507042
| 65
| 0.538789
|
3a30ac2099e6b02df160d43a48d947592c65928a
| 2,328
|
py
|
Python
|
cvat/apps/authentication/signals.py
|
syonekura/cvat
|
c579ba2319f967f09ab2bf080e357b2bde7180dd
|
[
"MIT"
] | null | null | null |
cvat/apps/authentication/signals.py
|
syonekura/cvat
|
c579ba2319f967f09ab2bf080e357b2bde7180dd
|
[
"MIT"
] | null | null | null |
cvat/apps/authentication/signals.py
|
syonekura/cvat
|
c579ba2319f967f09ab2bf080e357b2bde7180dd
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.conf import settings
from .settings import authentication
from django.contrib.auth.models import User, Group
def setup_group_permissions(group):
from cvat.apps.engine.models import Task
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
def append_permissions_for_model(model):
content_type = ContentType.objects.get_for_model(model)
for perm_target, actions in authentication.cvat_groups_definition[group.name]['permissions'].items():
for action in actions:
codename = '{}_{}'.format(action, perm_target)
try:
perm = Permission.objects.get(codename=codename, content_type=content_type)
group_permissions.append(perm)
except:
pass
group_permissions = []
append_permissions_for_model(Task)
group.permissions.set(group_permissions)
group.save()
def create_groups(sender, **kwargs):
for cvat_role, _ in authentication.cvat_groups_definition.items():
Group.objects.get_or_create(name=cvat_role)
def update_ldap_groups(sender, user=None, ldap_user=None, **kwargs):
user_groups = []
for cvat_role, role_settings in authentication.cvat_groups_definition.items():
group_instance, _ = Group.objects.get_or_create(name=cvat_role)
setup_group_permissions(group_instance)
for ldap_group in role_settings['ldap_groups']:
if ldap_group.lower() in ldap_user.group_dns:
user_groups.append(group_instance)
user.save()
user.groups.set(user_groups)
user.is_staff = user.is_superuser = user.groups.filter(name='admin').exists()
def create_user(sender, instance, created, **kwargs):
if instance.is_superuser and instance.is_staff:
admin_group, _ = Group.objects.get_or_create(name='admin')
admin_group.user_set.add(instance)
if created:
for cvat_role, _ in authentication.cvat_groups_definition.items():
group_instance, _ = Group.objects.get_or_create(name=cvat_role)
setup_group_permissions(group_instance)
if cvat_role in authentication.AUTH_SIMPLE_DEFAULT_GROUPS:
instance.groups.add(group_instance)
| 40.137931
| 109
| 0.705326
|
ed52a018299e87ca03331798b4ce7651140bdef4
| 4,186
|
py
|
Python
|
wrappers.py
|
haje01/impala
|
e40384cc5c6ff3a7bae00b4551290d3ffdbbe40a
|
[
"MIT"
] | 18
|
2018-10-12T06:55:07.000Z
|
2021-11-03T01:26:54.000Z
|
wrappers.py
|
haje01/impala
|
e40384cc5c6ff3a7bae00b4551290d3ffdbbe40a
|
[
"MIT"
] | null | null | null |
wrappers.py
|
haje01/impala
|
e40384cc5c6ff3a7bae00b4551290d3ffdbbe40a
|
[
"MIT"
] | 2
|
2018-11-29T06:02:53.000Z
|
2019-07-13T10:55:08.000Z
|
"""Gym wrapper 모듈."""
# from https://github.com/Shmuma/ptan
import cv2
import gym
import gym.spaces
import numpy as np
import collections
class FireResetEnv(gym.Wrapper):
def __init__(self, env=None):
"""For environments where the user need to press FIRE for the game to start."""
super(FireResetEnv, self).__init__(env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def step(self, action):
return self.env.step(action)
def reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env=None, skip=4):
"""Return only every `skip`-th frame"""
super(MaxAndSkipEnv, self).__init__(env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = collections.deque(maxlen=2)
self._skip = skip
def step(self, action):
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class ProcessFrame84(gym.ObservationWrapper):
def __init__(self, env=None):
super(ProcessFrame84, self).__init__(env)
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)
def observation(self, obs):
return ProcessFrame84.process(obs)
@staticmethod
def process(frame):
if frame.size == 210 * 160 * 3:
img = np.reshape(frame, [210, 160, 3]).astype(np.float32)
elif frame.size == 250 * 160 * 3:
img = np.reshape(frame, [250, 160, 3]).astype(np.float32)
else:
assert False, "Unknown resolution."
img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114
resized_screen = cv2.resize(img, (84, 110), interpolation=cv2.INTER_AREA)
x_t = resized_screen[18:102, :]
x_t = np.reshape(x_t, [84, 84, 1])
return x_t.astype(np.uint8)
class ImageToPyTorch(gym.ObservationWrapper):
def __init__(self, env):
super(ImageToPyTorch, self).__init__(env)
old_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(low=0.0, high=1.0, shape=(old_shape[-1], old_shape[0], old_shape[1]),
dtype=np.float32)
def observation(self, observation):
return np.moveaxis(observation, 2, 0)
class ScaledFloatFrame(gym.ObservationWrapper):
def observation(self, obs):
return np.array(obs).astype(np.float32) / 255.0
class BufferWrapper(gym.ObservationWrapper):
def __init__(self, env, n_steps, dtype=np.float32):
super(BufferWrapper, self).__init__(env)
self.dtype = dtype
old_space = env.observation_space
self.observation_space = gym.spaces.Box(old_space.low.repeat(n_steps, axis=0),
old_space.high.repeat(n_steps, axis=0), dtype=dtype)
def reset(self):
self.buffer = np.zeros_like(self.observation_space.low, dtype=self.dtype)
return self.observation(self.env.reset())
def observation(self, observation):
self.buffer[:-1] = self.buffer[1:]
self.buffer[-1] = observation
return self.buffer
def make_env(env_name):
env = gym.make(env_name)
env = MaxAndSkipEnv(env)
env = FireResetEnv(env)
env = ProcessFrame84(env)
env = ImageToPyTorch(env)
env = BufferWrapper(env, 4)
return ScaledFloatFrame(env)
| 33.488
| 117
| 0.61419
|
60ec88b4c6c28fefbd8937b7bae592407728f4a5
| 2,459
|
py
|
Python
|
lisc/collect/citations.py
|
jasongfleischer/lisc
|
ed30be957d7ce13ccbac51092990869840e6f176
|
[
"Apache-2.0"
] | null | null | null |
lisc/collect/citations.py
|
jasongfleischer/lisc
|
ed30be957d7ce13ccbac51092990869840e6f176
|
[
"Apache-2.0"
] | null | null | null |
lisc/collect/citations.py
|
jasongfleischer/lisc
|
ed30be957d7ce13ccbac51092990869840e6f176
|
[
"Apache-2.0"
] | null | null | null |
"""Collect citation data from OpenCitations."""
import json
from lisc.requester import Requester
from lisc.data.meta_data import MetaData
from lisc.urls.open_citations import OpenCitations
###################################################################################################
###################################################################################################
def collect_citations(dois, util='citations', logging=None, directory=None, verbose=False):
"""Collect citation data from OpenCitations.
Parameters
----------
dois : list of str
DOIs to collect citation data for.
util : {'citations', 'references'}
Which utility to collect citation data with. Options:
* 'citations': collects the number of citations citing the specified DOI.
* 'references': collects the number of references cited by the specified DOI.
logging : {None, 'print', 'store', 'file'}, optional
What kind of logging, if any, to do for requested URLs.
directory : str or SCDB, optional
Folder or database object specifying the save location.
verbose : bool, optional, default: False
Whether to print out updates.
Returns
-------
citations : dict
The number of citations for each DOI.
meta_data : MetaData
Meta data about the data collection.
"""
urls = OpenCitations()
urls.build_url(util)
meta_data = MetaData()
req = Requester(wait_time=0.1, logging=logging, directory=directory)
if verbose:
print('Collecting citation data.')
citations = {doi : get_citation_data(req, urls.get_url(util, [doi])) for doi in dois}
meta_data.add_requester(req)
return citations, meta_data
def get_citation_data(req, citation_url):
"""Extract the number of citations from an OpenCitations URL request.
Parameters
----------
req : Requester
Requester to launch requests from.
citation_url : str
URL to collect citation data from.
Returns
-------
n_citations : int
The number of citations the article has received.
"""
page = req.request_url(citation_url)
n_citations = len(json.loads(page.content.decode('utf-8')))
# If the return is empty, encode as None instead of zero
# This is because we don't want to treat missing data as 0 citations
if n_citations == 0:
n_citations = None
return n_citations
| 30.7375
| 99
| 0.618137
|
c6048075b1320b0c840bfe72de9f147e032d9012
| 4,917
|
py
|
Python
|
image_ops_scratch.py
|
nifetency/python-builtin
|
fb0a10fcd2722f86f8ebb120b3fcedfe083096eb
|
[
"MIT"
] | null | null | null |
image_ops_scratch.py
|
nifetency/python-builtin
|
fb0a10fcd2722f86f8ebb120b3fcedfe083096eb
|
[
"MIT"
] | null | null | null |
image_ops_scratch.py
|
nifetency/python-builtin
|
fb0a10fcd2722f86f8ebb120b3fcedfe083096eb
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import json
import base64
from matplotlib import pyplot as plt
def read_image_string(contents):
encoded_data = contents[0].split(',')[1]
nparr = np.frombuffer(base64.b64decode(encoded_data), np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
class ImageOperations(object):
def __init__(self, image_file_src):
self.image_file_src = image_file_src
self.MAX_PIXEL = 255
self.MIN_PIXEL = 0
self.MID_PIXEL = self.MAX_PIXEL // 2
def read_this(self, gray_scale=False):
image_src = self.image_file_src
if gray_scale:
image_src = cv2.cvtColor(image_src, cv2.COLOR_BGR2GRAY)
return image_src
def mirror_this(self, with_plot=False, gray_scale=False):
image_src = self.read_this(gray_scale=gray_scale)
image_mirror = np.fliplr(image_src)
if with_plot:
self.plot_it(orig_matrix=image_src, trans_matrix=image_mirror, head_text='Mirrored', gray_scale=gray_scale)
return None
return image_mirror
def flip_this(self, with_plot=False, gray_scale=False):
image_src = self.read_this(gray_scale=gray_scale)
image_flip = np.flipud(image_src)
if with_plot:
self.plot_it(orig_matrix=image_src, trans_matrix=image_flip, head_text='Flipped', gray_scale=gray_scale)
return None
return image_flip
def equalize_this(self, with_plot=False, gray_scale=False):
image_src = self.read_this(gray_scale=gray_scale)
if not gray_scale:
r_image = image_src[:, :, 0]
g_image = image_src[:, :, 1]
b_image = image_src[:, :, 2]
r_image_eq = cv2.equalizeHist(r_image)
g_image_eq = cv2.equalizeHist(g_image)
b_image_eq = cv2.equalizeHist(b_image)
image_eq = np.dstack(tup=(r_image_eq, g_image_eq, b_image_eq))
else:
image_eq = cv2.equalizeHist(image_src)
if with_plot:
self.plot_it(orig_matrix=image_src, trans_matrix=image_eq, head_text='Equalized', gray_scale=gray_scale)
return None
return image_eq
def convert_binary(self, image_matrix, thresh_val):
color_1 = self.MAX_PIXEL
color_2 = self.MIN_PIXEL
initial_conv = np.where((image_matrix <= thresh_val), image_matrix, color_1)
final_conv = np.where((initial_conv > thresh_val), initial_conv, color_2)
return final_conv
def binarize_this(self, with_plot=False, gray_scale=False, colors=None):
image_src = self.read_this(gray_scale=gray_scale)
image_b = self.convert_binary(image_matrix=image_src, thresh_val=self.MID_PIXEL)
if with_plot:
self.plot_it(orig_matrix=image_src, trans_matrix=image_b, head_text='Binarized', gray_scale=gray_scale)
return None
return image_b
def invert_this(self, with_plot=False, gray_scale=False):
image_src = self.read_this(gray_scale=gray_scale)
image_i = ~ image_src
if with_plot:
self.plot_it(orig_matrix=image_src, trans_matrix=image_i, head_text='Inverted', gray_scale=gray_scale)
return None
return image_i
def solarize_this(self, thresh_val=128, with_plot=False, gray_scale=False):
image_src = self.read_this(gray_scale=gray_scale)
if not gray_scale:
r_image, g_image, b_image = image_src[:, :, 0], image_src[:, :, 1], image_src[:, :, 2]
r_sol = np.where((r_image < thresh_val), r_image, ~r_image)
g_sol = np.where((g_image < thresh_val), g_image, ~g_image)
b_sol = np.where((b_image < thresh_val), b_image, ~b_image)
image_sol = np.dstack(tup=(r_sol, g_sol, b_sol))
else:
image_sol = np.where((image_src < thresh_val), image_src, ~image_src)
if with_plot:
self.plot_it(orig_matrix=image_src, trans_matrix=image_src, head_text='Solarized', gray_scale=gray_scale)
return None
return image_sol
def plot_it(self, orig_matrix, trans_matrix, head_text, gray_scale=False):
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10, 20))
cmap_val = None if not gray_scale else 'gray'
ax1.axis("off")
ax1.title.set_text('Original')
ax2.axis("off")
ax2.title.set_text(head_text)
ax1.imshow(orig_matrix, cmap=cmap_val)
ax2.imshow(trans_matrix, cmap=cmap_val)
plt.show()
return True
if __name__ == '__main__':
image = cv2.imread('lena_original.png', 1)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imo = ImageOperations(image_file_src=image)
imo.binarize_this(with_plot=True, gray_scale=False)
| 38.116279
| 119
| 0.649583
|
8e63b3f8e9c366ed51730bbefd84d6f991ecdb74
| 11,560
|
py
|
Python
|
venv/lib/python2.7/site-packages/ansible/modules/network/cnos/cnos_config.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 37
|
2017-08-15T15:02:43.000Z
|
2021-07-23T03:44:31.000Z
|
venv/lib/python2.7/site-packages/ansible/modules/network/cnos/cnos_config.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 12
|
2018-01-10T05:25:25.000Z
|
2021-11-28T06:55:48.000Z
|
venv/lib/python2.7/site-packages/ansible/modules/network/cnos/cnos_config.py
|
haind27/test01
|
7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852
|
[
"MIT"
] | 49
|
2017-08-15T09:52:13.000Z
|
2022-03-21T17:11:54.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) 2017 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Module to configure Lenovo Switches.
# Lenovo Networking
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cnos_config
version_added: "2.6"
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Manage Lenovo CNOS configuration sections
description:
- Lenovo CNOS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with CNOS configuration sections in
a deterministic way.
notes:
- Tested against CNOS 10.8.0.42
options:
provider:
version_added: "2.6"
description:
- A dict object containing connection details.
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is
mutually exclusive with I(lines), I(parents).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
default: line
choices: ['line', 'block', 'config']
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
type: bool
default: 'no'
comment:
description:
- Allows a commit description to be specified to be included
when the configuration is committed. If the configuration is
not changed or committed, this argument is ignored.
default: 'configured by cnos_config'
admin:
description:
- Enters into administration configuration mode for making config
changes to the device.
type: bool
default: 'no'
"""
EXAMPLES = """
Tasks: The following are examples of using the module cnos_config.
---
- name: configure top level configuration
cnos_config:
"lines: hostname {{ inventory_hostname }}"
- name: configure interface settings
cnos_config:
lines:
- enable
- ip ospf enable
parents: interface ip 13
- name: load a config from disk and replace the current config
cnos_config:
src: config.cfg
backup: yes
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: string
sample: /playbooks/ansible/backup/cnos01.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cnos.cnos import load_config, get_config
from ansible.module_utils.network.cnos.cnos import cnos_argument_spec
from ansible.module_utils.network.cnos.cnos import check_args
from ansible.module_utils.network.common.config import NetworkConfig, dumps
DEFAULT_COMMIT_COMMENT = 'configured by cnos_config'
def get_running_config(module):
contents = module.params['config']
if not contents:
contents = get_config(module)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
replace_config = replace == 'config'
path = module.params['parents']
comment = module.params['comment']
admin = module.params['admin']
check_mode = module.check_mode
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
contents = get_running_config(module)
configobj = NetworkConfig(contents=contents, indent=1)
commands = candidate.difference(configobj, path=path, match=match,
replace=replace)
else:
commands = candidate.items
if commands:
commands = dumps(commands, 'commands').split('\n')
if any((module.params['lines'], module.params['src'])):
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
diff = load_config(module, commands)
if diff:
result['diff'] = dict(prepared=diff)
result['changed'] = True
def main():
"""main entry point for module execution
"""
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict',
'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
config=dict(),
backup=dict(type='bool', default=False),
comment=dict(default=DEFAULT_COMMIT_COMMENT),
admin=dict(type='bool', default=False)
)
argument_spec.update(cnos_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('replace', 'config', ['src'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 36.352201
| 88
| 0.662284
|
9258cd96cfee2593f12b6129be488987622156ea
| 244
|
py
|
Python
|
src/test/data/pa3/sample/str_cmp.py
|
Leo-Enrique-Wu/chocopy_compiler_code_generation
|
4606be0531b3de77411572aae98f73169f46b3b9
|
[
"BSD-2-Clause"
] | 7
|
2021-08-28T18:20:45.000Z
|
2022-02-01T07:35:59.000Z
|
src/test/data/pa3/sample/str_cmp.py
|
Leo-Enrique-Wu/chocopy_compiler_code_generation
|
4606be0531b3de77411572aae98f73169f46b3b9
|
[
"BSD-2-Clause"
] | 4
|
2020-05-18T01:06:15.000Z
|
2020-06-12T19:33:14.000Z
|
src/test/data/pa3/sample/str_cmp.py
|
Leo-Enrique-Wu/chocopy_compiler_code_generation
|
4606be0531b3de77411572aae98f73169f46b3b9
|
[
"BSD-2-Clause"
] | 5
|
2019-11-27T05:11:05.000Z
|
2021-06-29T14:31:14.000Z
|
a:str = "Hello"
b:str = "World"
c:str = "ChocoPy"
def eq(a:str, b:str) -> bool:
return a == b
def neq(a:str, b:str) -> bool:
return a != b
print(eq(a,a))
print(eq(a,b))
print(neq(a,b))
print(neq(b,b))
print(eq(c,a))
print(neq(c,b))
| 13.555556
| 30
| 0.561475
|
aef440217473b11ff70ce38e5a327457587adc70
| 19,102
|
py
|
Python
|
src/serialbox-python/compare/compare.py
|
elsagermann/serialbox
|
c590561d0876f3ce9a07878e4862a46003a37879
|
[
"BSD-2-Clause"
] | null | null | null |
src/serialbox-python/compare/compare.py
|
elsagermann/serialbox
|
c590561d0876f3ce9a07878e4862a46003a37879
|
[
"BSD-2-Clause"
] | null | null | null |
src/serialbox-python/compare/compare.py
|
elsagermann/serialbox
|
c590561d0876f3ce9a07878e4862a46003a37879
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
##
## S E R I A L B O X
##
## This file is distributed under terms of BSD license.
## See LICENSE.txt for more information.
##
##===------------------------------------------------------------------------------------------===##
##
## Compares Serialbox fields in FILE_1 and FILE_2.
## FILE_1 and FILE_2 can be .dat or .json files, but need to be of the same type. When they are
## .json files, all fields are compared. When they are .dat files, the field name has to be the
## last part of the file name, separated by '_'.
##
##===------------------------------------------------------------------------------------------===##
from __future__ import print_function
from sys import exit, stderr, version_info
# Check Python version
if version_info < (3, 4):
from platform import python_version
print("compare: error: compare requires at least python 3.4 (detected %s)" % python_version(),
file=stderr)
exit(1)
from argparse import ArgumentParser
from os import path
from sys import path as sys_path
from time import time
from enum import Enum
import numpy as np
from math import isnan
import re
# Find the Serialbox python module
sys_path.insert(1, path.join(path.dirname(path.realpath(__file__)), "../"))
import serialbox as ser
class Config(object):
""" Global configuration """
def __init__(self):
# Use colored output?
self.USE_COLOR = True
# Maximum number of errors to report per field
self.MAX_ERRORS = 10
# Tolerance used for field comparison
self.TOL = 1e-12
# Tolerance used per field for field comparison
self.TOLS = dict()
# Tolerance used for field comparison
self.SAVEPOINT_REGEX = ""
# Treat mismatches in the metainfo of fields as warnings (mismatches "in dimensions are
# still errors)
self.FIELD_INFO_MISMATCH_WARN = False
# Only compare field meta-info (no data comparison)
self.FIELD_INFO_ONLY = False
def tol(self, field = None):
""" Get tolerance """
return self.TOLS.get(field, self.TOL)
g_config = Config()
def read_tolerances(filename):
""" Read tolerance dictionary from JSON """
import json
with open(filename, 'r') as f:
tols = json.load(f)
return tols
def get_config():
""" Access the global configuration """
global g_config
return g_config
def fatal_error(msg):
""" Report an error message and exit with 1 """
print("compare: error: {}".format(msg), file=stderr)
exit(1)
class Alignment(Enum):
""" Alignment options used in report """
LEFT = '<'
CENTER = '^'
RIGHT = '>'
class Color(Enum):
""" Colors used in report
https://stackoverflow.com/questions/15580303/python-output-complex-line-with-floats-colored-by-value
"""
GREEN = '\033[92m'
RED = '\033[91m'
BOLD_WHITE = '\033[1;93m'
RESET = '\033[0m'
def report(prefix, msg, alignment=Alignment.CENTER, color=Color.GREEN):
""" Report a msg to stdout """
fmt_str = "[{:" + alignment.value + "10}]"
if get_config().USE_COLOR:
fmt_str = color.value + fmt_str + color.RESET.value
fmt_str += " {}"
print(fmt_str.format(prefix, msg))
def make_serializer_and_extract_field(file):
""" Create a Serializer of the archive pointed to by file and extract the field name
If file is set to '/home/foo/bar_SomeField.dat' we will open a Serializer in
'/home/foo' with the prefix 'bar' and return 'SomeField' to be used for comparison. If file
is '/home/foo/MetaData-bar.json' we will return None for field and compare every field in
the archive.
"""
file = path.abspath(file)
directory = path.dirname(file)
basename = path.basename(file)
filename, extension = path.splitext(basename)
if extension == ".json":
# New Serialbox archive 'MetaData-prefix.json'
if "MetaData-" in filename:
prefix = filename[len("MetaData-"):]
# Old Serialbox archive 'prefix.json'
else:
prefix = filename
field = None
else:
first_underscore = filename.find("_")
if first_underscore == -1:
fatal_error(
"cannot extract archive prefix from field file '{}': missing '_'".format(file))
prefix = filename[:first_underscore]
field = filename[first_underscore + 1:]
serializer = None
try:
serializer = ser.Serializer(ser.OpenModeKind.Read, directory, prefix)
except ser.SerialboxError as e:
fatal_error(e)
return serializer, field
def compare_infos(serializers, field):
""" Compare the field-meta infos of field_1 to field_2 and return True on equality """
serializer_1, serializer_2 = serializers
info_1, info_2 = serializer_1.get_field_metainfo(field), serializer_2.get_field_metainfo(field)
if info_1 == info_2:
return True
hard_error = False
# Compare type and dimensions
for attrname in ["type", "dims"]:
attr_1, attr_2 = getattr(info_1, attrname), getattr(info_2, attrname)
if attr_1 != attr_2:
print(
" Field meta-info mismatch: {}\n Expected: {}\n Actual: {}".format(
attrname,
attr_1,
attr_2))
hard_error = True
if hard_error:
return False
# Compare metainfo
metainfo_1 = info_1.metainfo.to_dict()
metainfo_2 = info_2.metainfo.to_dict()
diff = dict(set(metainfo_2.items()) - set(metainfo_1.items()))
if diff:
hard_error = False if get_config().FIELD_INFO_MISMATCH_WARN else True
list_1, list_2 = "", ""
for key, value in sorted(metainfo_1.items()):
list_1 += "{}{}: {}\n".format(6 * " ", key, value)
for key, value in sorted(metainfo_2.items()):
use_color = False
if get_config().USE_COLOR:
use_color = key in diff
list_2 += "{}{}{}: {}{}\n".format(Color.BOLD_WHITE.value if use_color else "",
6 * " ", key,
value, Color.RESET.value if use_color else "")
print(
" Field meta-info mismatch: meta-info\n Expected:\n{}\n Actual:\n{}".format(
list_1, list_2))
return not hard_error
def compare_fields(serializers, field, savepoint, dim_bounds):
""" If field is not None compare the field at each savepoint if field is None compare every
field at every savepoint (full archive comparison).
"""
serializer_1, serializer_2 = serializers
field_1 = serializer_1.read(field, savepoint)
field_2 = serializer_2.read(field, savepoint)
dims = serializer_1.get_field_metainfo(field).dims
if len(dims) > len(dim_bounds):
print(" Field dimension '{}' exceeds maximum of 4 dimension")
return False
slices = []
for i in range(0, len(dims)):
slices += [dim_bounds[i]]
# Get a view of a field incorporating the user defined slices
field_view_1 = field_1[slices]
field_view_2 = field_2[slices]
assert field_view_1.size == field_view_2.size
errors = []
num_nans = 0
max_abs_error = 0
max_rel_error = 0
tol = get_config().tol(field)
it_1 = np.nditer(field_view_1, flags=['multi_index'])
it_2 = np.nditer(field_view_2, flags=['multi_index'])
# Iterate the fields
while not it_1.finished and not it_2.finished:
value_1, value_2 = it_1[0], it_2[0]
value_1_isnan, value_2_isnan = isnan(value_1), isnan(value_2)
# Check for NaN values
num_nans += value_1_isnan + value_2_isnan
if value_1_isnan != value_2_isnan:
errors += [
{"index": it_1.multi_index, "value_1": value_1, "value_2": value_2,
"error": float('nan')}]
# Compute error
else:
if(value_1.dtype == 'bool'):
if(value_1 != value_2):
errors += [
{"index": it_1.multi_index, "value_1": value_1, "value_2": value_2,
"error": 1.0}]
else:
abs_error = abs(value_2 - value_1)
rel_error = abs((value_2 - value_1) / value_2) if abs(value_2) > 1.0 else 0
err = rel_error if abs(value_2) > 1.0 else abs_error
# Check error
if err > tol:
errors += [
{"index": it_1.multi_index, "value_1": value_1, "value_2": value_2,
"error": err}]
max_abs_error = max(max_abs_error, abs_error)
max_rel_error = max(max_rel_error, rel_error)
it_1.iternext()
it_2.iternext()
# All good!
if len(errors) == 0:
return True
# Report the errors
num_errors = len(errors)
num_errors_displayed = min(get_config().MAX_ERRORS, num_errors)
if num_errors_displayed > 0:
print(" Failed values (displayed {} of {}):".format(num_errors_displayed, num_errors))
for idx in range(0, num_errors_displayed):
print(" {}: value_1 = {:.10f}, value_2 = {:.10f}, error = {:.10e}".format(
errors[idx]["index"], float(errors[idx]["value_1"]), float(errors[idx]["value_2"]),
float(errors[idx]["error"])))
print(" Number of errors: {}".format(num_errors))
print(" Number of NaN: {}".format(num_nans))
print(" Percentage of errors: {:.2f} %".format(100 * num_errors / field_view_1.size))
print(" Maximum absolute error: {:.10e}".format(max_abs_error))
print(" Maximum relative error: {:.10e}".format(max_rel_error))
return False
def compare(serializers, field_to_check, dim_bounds):
""" Compare the data and info at every savepoint of field_1 to field_2 and returns
True on success
"""
serializer_1, serializer_2 = serializers
report(10 * "=", "Set-up serializers.")
report("", " serializer_1 = '{}' (prefix '{}')".format(serializer_1.directory,
serializer_1.prefix))
report("", " serializer_2 = '{}' (prefix '{}')".format(serializer_2.directory,
serializer_2.prefix))
num_comparison = 0
failures = []
start_time = time()
# Compute elapsed time in ms
def get_time(start):
return int(1000 * (time() - start))
# Empty regex string will match every savepoint -> regex wil be set to None
savepoint_regex = None if get_config().SAVEPOINT_REGEX == "" else re.compile(
get_config().SAVEPOINT_REGEX)
for savepoint in serializer_1.savepoint_list():
# Savepoint not present in both serializers -> skip
if not serializer_2.has_savepoint(savepoint):
continue
# Do we check this savepoint?
if savepoint_regex is not None and not savepoint_regex.match(savepoint.name):
continue
# Find the intersection of the fields at this savepoint
fields_at_savepoint_1 = serializer_1.fields_at_savepoint(savepoint)
fields_at_savepoint_2 = serializer_2.fields_at_savepoint(savepoint)
fields_at_savepoint = list(set(fields_at_savepoint_1).intersection(fields_at_savepoint_2))
# If field_to_check is None, we always check all fields at the savepoint
if field_to_check is None:
fields_to_check = fields_at_savepoint
else:
fields_to_check = [field_to_check] if field_to_check in fields_at_savepoint else []
savepoint_start_time = time()
if fields_to_check:
report(10 * "-", "Savepoint {}".format(savepoint))
# Check field(s) at the savepoint
for field in fields_to_check:
field_start_time = time()
num_comparison += 1
report(" RUN", "{}".format(field), Alignment.LEFT)
# Check the field info of the field
if not compare_infos(serializers, field):
failures += [{"savepoint": str(savepoint), "field": field}]
report("FAILED", "{} ({} ms) ".format(field, get_time(field_start_time)),
color=Color.RED)
else:
# Compare the data of the fields
if not get_config().FIELD_INFO_ONLY and not compare_fields(serializers, field,
savepoint, dim_bounds):
failures += [{"savepoint": str(savepoint), "field": field}]
report("FAILED",
"{} ({} ms) ".format(field, get_time(field_start_time)),
color=Color.RED)
report("OK ", "{} ({} ms) ".format(field, get_time(field_start_time)),
Alignment.RIGHT)
if fields_to_check:
report(10 * "-",
"Savepoint {} ({} ms total)".format(savepoint.name,
get_time(savepoint_start_time)))
report(10 * "=",
"{} comparisons ran. ({} ms total)".format(num_comparison, get_time(start_time)))
print("")
report(10 * "=", "Tear-down serializers.")
num_failures = len(failures)
num_success = num_comparison - num_failures
if num_success > 0:
report("PASSED", "{} comparisons.".format(num_success))
if num_failures:
report("FAILED", "{} comparisons.".format(num_failures), color=Color.RED)
for failure in failures:
report("FAILED", "{} at Savepoint {}".format(failure["field"], failure["savepoint"]),
color=Color.RED)
return 1 if num_failures else 0
def main(arguments=None):
parser = ArgumentParser(
description=
"""
Compares Serialbox fields in FILE_1 and FILE_2.
FILE_1 and FILE_2 can be .dat or .json files, but need to be of the same type. When they are
.json files, all fields are compared. When they are .dat files, the field name has to be the
last part of the file name, separated by '_'.
"""
)
parser.add_argument('FILE_1', help="Path to a field file (.dat) or archive (.json)", nargs=1,
type=str)
parser.add_argument('FILE_2', help="Path to a field file (.dat) or archive (.json)", nargs=1,
type=str)
parser.add_argument("--version", action="version",
version="compare (Serialbox {})".format(ser.__version__),
help="show version information and exit")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
help="enable verbose logging")
parser.add_argument("--no-color", dest="no_color", action="store_true",
help="disabled colored output (default: {})".format(
not get_config().USE_COLOR))
parser.add_argument("-m", "--max-errors", dest="max_errors", metavar="NUM",
default=get_config().MAX_ERRORS,
type=int,
help="report up to 'NUM' errors per field (default: {})".format(
get_config().MAX_ERRORS))
parser.add_argument("-w", "--info-warn", dest="field_info_mismatch_warn", action='store_true',
help="treat mismatches in the metainfo of fields as warnings (mismatches "
"in dimensions and data type are still errors) (default: {})".format(
get_config().FIELD_INFO_MISMATCH_WARN))
parser.add_argument("-s", "--savepoint-filter", dest="savepoint_regex", metavar="REGEX",
default=get_config().SAVEPOINT_REGEX, type=str,
help="only compare fields of savepoints whose name matches REGEX. An "
"empty string will match every savepoint (default: \"{}\")".format(
get_config().SAVEPOINT_REGEX))
parser.add_argument("-t", "--tolerance", dest="tolerance", metavar="TOL",
default=get_config().TOL,
help="set the tolerance used for comparison to 'TOL' (default : {})".format(
get_config().TOL))
parser.add_argument("-T", "--tolerance-json", dest="tolerance_file", metavar="TOLERANCE_FILE",
default=None,
help="set the JSON file for per field tolerance used for comparison")
parser.add_argument("-q", "--info-only", dest="field_info_only", action="store_true",
help="only compare field meta-info (no data comparison) "
"(default: {})".format(get_config().FIELD_INFO_ONLY))
for dim in ["i", "j", "k", "l"]:
parser.add_argument("-{}".format(dim), dest="{}".format(dim), metavar="START[:END]",
help="only compare the {} dimension 'START' or if 'END' is supplied "
"compare in the range ['START', 'END']".format(dim))
args = parser.parse_args(arguments) if arguments else parser.parse_args()
if args.verbose:
ser.Logging.enable()
get_config().USE_COLOR = not args.no_color
get_config().FIELD_INFO_MISMATCH_WARN = args.field_info_mismatch_warn
get_config().FIELD_INFO_ONLY = args.field_info_only
get_config().MAX_ERRORS = args.max_errors
get_config().SAVEPOINT_REGEX = args.savepoint_regex
get_config().TOL = float(args.tolerance)
if args.tolerance_file is not None:
get_config().TOLS = read_tolerances(args.tolerance_file)
path_1, path_2 = (args.FILE_1[0], args.FILE_2[0])
# Check paths exists
for file in [path_1, path_2]:
if not path.exists(file):
fatal_error("file '{}' does not exist".format(file))
# Extract bounds and create the slices
dim_bounds = []
for dim in ["i", "j", "k", "l"]:
if getattr(args, dim):
bound = getattr(args, dim).split(":")
dim_bounds += [slice(bound) if len(bound) == 1 else slice(bound[0], bound[1])]
else:
dim_bounds += [slice(None)]
# Open archives and create read-only serializers
serializer_1, field_1 = make_serializer_and_extract_field(path_1)
serializer_2, field_2 = make_serializer_and_extract_field(path_2)
if field_1 != field_2:
fatal_error("field_1 '{}' is not equal to field_2 '{}'".format(field_1, field_2))
# Perform comparison
ret = 1
try:
ret = compare([serializer_1, serializer_2], field_1, dim_bounds)
except ser.SerialboxError as e:
fatal_error(e)
return ret
if __name__ == '__main__':
exit(main())
| 37.976143
| 104
| 0.584756
|
295e676da90513c1a432cf71261a2fbe9f393c9e
| 4,404
|
py
|
Python
|
lib/kubernetes/client/models/v2beta2_external_metric_source.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 7
|
2019-12-21T00:14:14.000Z
|
2021-03-11T14:51:37.000Z
|
lib/kubernetes/client/models/v2beta2_external_metric_source.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 29
|
2019-10-09T11:16:21.000Z
|
2020-06-23T09:32:09.000Z
|
lib/kubernetes/client/models/v2beta2_external_metric_source.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 1
|
2021-05-07T10:13:31.000Z
|
2021-05-07T10:13:31.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V2beta2ExternalMetricSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'metric': 'V2beta2MetricIdentifier',
'target': 'V2beta2MetricTarget'
}
attribute_map = {
'metric': 'metric',
'target': 'target'
}
def __init__(self, metric=None, target=None):
"""
V2beta2ExternalMetricSource - a model defined in Swagger
"""
self._metric = None
self._target = None
self.discriminator = None
self.metric = metric
self.target = target
@property
def metric(self):
"""
Gets the metric of this V2beta2ExternalMetricSource.
metric identifies the target metric by name and selector
:return: The metric of this V2beta2ExternalMetricSource.
:rtype: V2beta2MetricIdentifier
"""
return self._metric
@metric.setter
def metric(self, metric):
"""
Sets the metric of this V2beta2ExternalMetricSource.
metric identifies the target metric by name and selector
:param metric: The metric of this V2beta2ExternalMetricSource.
:type: V2beta2MetricIdentifier
"""
if metric is None:
raise ValueError("Invalid value for `metric`, must not be `None`")
self._metric = metric
@property
def target(self):
"""
Gets the target of this V2beta2ExternalMetricSource.
target specifies the target value for the given metric
:return: The target of this V2beta2ExternalMetricSource.
:rtype: V2beta2MetricTarget
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this V2beta2ExternalMetricSource.
target specifies the target value for the given metric
:param target: The target of this V2beta2ExternalMetricSource.
:type: V2beta2MetricTarget
"""
if target is None:
raise ValueError("Invalid value for `target`, must not be `None`")
self._target = target
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V2beta2ExternalMetricSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.050955
| 106
| 0.556085
|
2f3b9e0a09485c4d0581cb2e100fe153f2d1b844
| 3,139
|
py
|
Python
|
proxypool/db.py
|
kingkap/ProxyPool
|
0908ed91dde8732d38971aa741a3d4e2254265c1
|
[
"Apache-2.0"
] | null | null | null |
proxypool/db.py
|
kingkap/ProxyPool
|
0908ed91dde8732d38971aa741a3d4e2254265c1
|
[
"Apache-2.0"
] | null | null | null |
proxypool/db.py
|
kingkap/ProxyPool
|
0908ed91dde8732d38971aa741a3d4e2254265c1
|
[
"Apache-2.0"
] | null | null | null |
import redis
from proxypool.error import PoolEmptyError
from proxypool.setting import REDIS_HOST, REDIS_PORT, REDIS_PASSWORD, REDIS_KEY
from proxypool.setting import MAX_SCORE, MIN_SCORE, INITIAL_SCORE
from random import choice
import re
class RedisClient(object):
def __init__(self, host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD):
"""
初始化
:param host: Redis 地址
:param port: Redis 端口
:param password: Redis密码
"""
self.db = redis.StrictRedis(host=host, port=port, password=password, decode_responses=True)
def add(self, proxy, score=INITIAL_SCORE):
"""
添加代理,设置分数为最高
:param proxy: 代理
:param score: 分数
:return: 添加结果
"""
if not re.match('\d+\.\d+\.\d+\.\d+\:\d+', proxy):
print('代理不符合规范', proxy, '丢弃')
return
if not self.db.zscore(REDIS_KEY, proxy):
if redis.VERSION[0] < 3:
return self.db.zadd(REDIS_KEY, score, proxy)
return self.db.zadd(REDIS_KEY, {proxy: score})
def random(self):
"""
随机获取有效代理,首先尝试获取最高分数代理,如果不存在,按照排名获取,否则异常
:return: 随机代理
"""
result = self.db.zrangebyscore(REDIS_KEY, MAX_SCORE, MAX_SCORE)
if len(result):
return choice(result)
else:
result = self.db.zrevrange(REDIS_KEY, 0, 100)
if len(result):
return choice(result)
else:
raise PoolEmptyError
def decrease(self, proxy):
"""
代理值减一分,小于最小值则删除
:param proxy: 代理
:return: 修改后的代理分数
"""
score = self.db.zscore(REDIS_KEY, proxy)
if score and score > MIN_SCORE:
print('代理', proxy, '当前分数', score, '减1')
if redis.VERSION[0] < 3:
return self.db.zincrby(REDIS_KEY, proxy, -1)
return self.db.zincrby(REDIS_KEY, -1, proxy)
else:
print('代理', proxy, '当前分数', score, '移除')
return self.db.zrem(REDIS_KEY, proxy)
def exists(self, proxy):
"""
判断是否存在
:param proxy: 代理
:return: 是否存在
"""
return not self.db.zscore(REDIS_KEY, proxy) == None
def max(self, proxy):
"""
将代理设置为MAX_SCORE
:param proxy: 代理
:return: 设置结果
"""
print('代理', proxy, '可用,设置为', MAX_SCORE)
if redis.VERSION[0] < 3:
return self.db.zadd(REDIS_KEY, MAX_SCORE, proxy)
return self.db.zadd(REDIS_KEY, {proxy: MAX_SCORE})
def count(self):
"""
获取数量
:return: 数量
"""
return self.db.zcard(REDIS_KEY)
def all(self):
"""
获取全部代理
:return: 全部代理列表
"""
return self.db.zrangebyscore(REDIS_KEY, MIN_SCORE, MAX_SCORE)
def batch(self, start, stop):
"""
批量获取
:param start: 开始索引
:param stop: 结束索引
:return: 代理列表
"""
return self.db.zrevrange(REDIS_KEY, start, stop - 1)
if __name__ == '__main__':
conn = RedisClient()
result = conn.batch(680, 688)
print(result)
| 28.026786
| 99
| 0.549538
|
44ba3d71073424af58b696cad433f49ce93825f1
| 1,382
|
py
|
Python
|
konan_sdk/konan_service/routers.py
|
SynapseAnalytics/konan-sdk
|
b90f311d7e6e3f3a08ec8ef8ed03b78a33a586f9
|
[
"MIT"
] | null | null | null |
konan_sdk/konan_service/routers.py
|
SynapseAnalytics/konan-sdk
|
b90f311d7e6e3f3a08ec8ef8ed03b78a33a586f9
|
[
"MIT"
] | 6
|
2021-12-01T13:56:40.000Z
|
2022-03-28T10:06:05.000Z
|
konan_sdk/konan_service/routers.py
|
SynapseAnalytics/konan-sdk
|
b90f311d7e6e3f3a08ec8ef8ed03b78a33a586f9
|
[
"MIT"
] | null | null | null |
from typing import Callable, Type
from fastapi.types import DecoratedCallable
from fastapi_utils.inferring_router import InferringRouter
from konan_sdk.konan_service.serializers import (
KonanServiceBasePredictionResponse, KonanServiceBaseEvaluateResponse)
class KonanServiceRouter(InferringRouter):
def __init__(
self,
*,
predict_response_class: Type = KonanServiceBasePredictionResponse,
evaluate_response_class: Type = KonanServiceBaseEvaluateResponse,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._predict_response_class = predict_response_class
self._evaluate_response_class = evaluate_response_class
def healthz(
self,
**kwargs,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.get(
'/healthz',
**kwargs
)
def predict(
self,
**kwargs,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.post(
'/predict',
response_model=self._predict_response_class,
**kwargs,
)
def evaluate(
self,
**kwargs,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.post(
'/evaluate',
response_model=self._evaluate_response_class,
**kwargs,
)
| 27.64
| 74
| 0.638205
|
3de437bc7256891e0d844d2ce2f038708283b601
| 10,060
|
py
|
Python
|
GM2AUTOSAR_MM/Properties/negative/Himesis/HTrivialFalseECUplusSystem1CompleteLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 3
|
2017-06-02T19:26:27.000Z
|
2021-06-14T04:25:45.000Z
|
GM2AUTOSAR_MM/Properties/negative/Himesis/HTrivialFalseECUplusSystem1CompleteLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 8
|
2016-08-24T07:04:07.000Z
|
2017-05-26T16:22:47.000Z
|
GM2AUTOSAR_MM/Properties/negative/Himesis/HTrivialFalseECUplusSystem1CompleteLHS.py
|
levilucio/SyVOLT
|
7526ec794d21565e3efcc925a7b08ae8db27d46a
|
[
"MIT"
] | 1
|
2019-10-31T06:00:23.000Z
|
2019-10-31T06:00:23.000Z
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
class HTrivialFalseECUplusSystem1CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HTrivialFalseECUplusSystem1CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HTrivialFalseECUplusSystem1CompleteLHS, self).__init__(name='HTrivialFalseECUplusSystem1CompleteLHS', num_nodes=3, edges=[])
# Add the edges
self.add_edges([[1, 0], [2, 1]])
# Set the graph attributes
self["mm__"] = ['MT_pre__GM2AUTOSAR_MM', 'MoTifRule']
self["MT_constraint__"] = """if PreNode('1')['cardinality']=='+' and PreNode('2')['cardinality']=='1':
return True
return False
"""
self["name"] = """"""
self["GUID__"] = 6134117061306529222
# Set the node attributes
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__PhysicalNode"""
self.vs[0]["MT_subtypes__"] = []
self.vs[0]["MT_dirty__"] = False
self.vs[0]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["GUID__"] = 4629466633132705921
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_label__"] = """3"""
self.vs[1]["mm__"] = """MT_pre__trace_link"""
self.vs[1]["MT_subtypes__"] = []
self.vs[1]["MT_dirty__"] = False
self.vs[1]["GUID__"] = 1625170057596913467
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """2"""
self.vs[2]["mm__"] = """MT_pre__System"""
self.vs[2]["MT_subtypes__"] = []
self.vs[2]["MT_dirty__"] = False
self.vs[2]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["GUID__"] = 4867395514378831832
def eval_classtype1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
if PreNode('1')['cardinality']=='+' and PreNode('2')['cardinality']=='1':
return True
return False
| 47.45283
| 138
| 0.530318
|
0f38f930dfafa39cff3d01759e54cb748127ef98
| 1,201
|
py
|
Python
|
pypro/modulos/tests/test_aula_detalhes.py
|
taniodev/curso-django
|
aa5b0edd6ca55d2ea7f73220644d5c64a96c60df
|
[
"MIT"
] | null | null | null |
pypro/modulos/tests/test_aula_detalhes.py
|
taniodev/curso-django
|
aa5b0edd6ca55d2ea7f73220644d5c64a96c60df
|
[
"MIT"
] | 19
|
2019-11-26T20:38:55.000Z
|
2021-09-22T18:08:51.000Z
|
pypro/modulos/tests/test_aula_detalhes.py
|
taniodev/curso-django
|
aa5b0edd6ca55d2ea7f73220644d5c64a96c60df
|
[
"MIT"
] | null | null | null |
import pytest
from django.urls import reverse
from model_bakery import baker
from pypro.django_assertions import assert_contains
from pypro.modulos.models import Aula, Modulo
@pytest.fixture
def modulo(db):
return baker.make(Modulo)
@pytest.fixture
def aula(modulo):
return baker.make(Aula, modulo=modulo)
@pytest.fixture
def resp(client_com_usuario_logado, aula):
resp = client_com_usuario_logado.get(reverse('modulos:aula', kwargs={'slug': aula.slug}))
return resp
def test_titulo(resp, aula: Aula):
assert_contains(resp, aula.titulo)
def test_vimeo(resp, aula: Aula):
assert_contains(resp, f'src="https://player.vimeo.com/video/{ aula.vimeo_id }"')
def test_modulo_breadcrumb(resp, modulo: Modulo):
assert_contains(resp, f'<li class="breadcrumb-item"><a href="{modulo.get_absolute_url()}">{modulo.titulo}</a></li>')
@pytest.fixture
def resp_sem_usuario_logado(client, aula):
resp = client.get(reverse('modulos:aula', kwargs={'slug': aula.slug}))
return resp
def test_acesso_com_usuario_nao_logado(resp_sem_usuario_logado):
assert resp_sem_usuario_logado.status_code == 302
assert resp_sem_usuario_logado.url.startswith(reverse('login'))
| 26.108696
| 120
| 0.756037
|
cfac97f3dfe81a355f44aed249ece28a7c973c3e
| 6,245
|
py
|
Python
|
multinet/api/views/serializers.py
|
multinet-app/multinet-api
|
a658d787f0fb9ba415ed85a1e37c29953486287f
|
[
"Apache-2.0"
] | null | null | null |
multinet/api/views/serializers.py
|
multinet-app/multinet-api
|
a658d787f0fb9ba415ed85a1e37c29953486287f
|
[
"Apache-2.0"
] | 91
|
2021-03-15T19:00:15.000Z
|
2022-03-11T00:04:05.000Z
|
multinet/api/views/serializers.py
|
multinet-app/multinet-api
|
a658d787f0fb9ba415ed85a1e37c29953486287f
|
[
"Apache-2.0"
] | 1
|
2022-02-05T15:53:04.000Z
|
2022-02-05T15:53:04.000Z
|
from django.contrib.auth.models import User
from django.contrib.auth.validators import UnicodeUsernameValidator
from rest_framework import serializers
from multinet.api.models import AqlQuery, Network, Table, TableTypeAnnotation, Upload, Workspace
# The default ModelSerializer for User fails if the user already exists
class UserSerializer(serializers.Serializer):
username = serializers.CharField(validators=[UnicodeUsernameValidator()])
class UserDetailSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = [
'id',
'username',
'email',
'first_name',
'last_name',
'is_superuser',
]
# TODO: Add WorkspaceCreateSerializer that this inherits from,
# and specify arnago_db_name on the extended serializer
class WorkspaceCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Workspace
fields = [
'id',
'name',
'created',
'modified',
'public',
]
read_only_fields = ['created']
class WorkspaceRenameSerializer(serializers.ModelSerializer):
class Meta:
model = Workspace
fields = [
'name',
]
class WorkspaceSerializer(serializers.ModelSerializer):
class Meta:
model = Workspace
fields = WorkspaceCreateSerializer.Meta.fields + [
'arango_db_name',
]
read_only_fields = ['created']
class PermissionsCreateSerializer(serializers.Serializer):
public = serializers.BooleanField()
owner = UserSerializer()
maintainers = UserSerializer(many=True)
writers = UserSerializer(many=True)
readers = UserSerializer(many=True)
class PermissionsReturnSerializer(serializers.ModelSerializer):
owner = UserDetailSerializer()
maintainers = UserDetailSerializer(many=True)
writers = UserDetailSerializer(many=True)
readers = UserDetailSerializer(many=True)
class Meta:
model = Workspace
fields = WorkspaceCreateSerializer.Meta.fields + [
'owner',
'maintainers',
'writers',
'readers',
]
class SingleUserWorkspacePermissionSerializer(serializers.Serializer):
# Allow empty username since anonymous user is a reader for public workspaces
username = serializers.CharField(validators=[UnicodeUsernameValidator()], allow_blank=True)
workspace = serializers.CharField()
permission = serializers.IntegerField(allow_null=True)
permission_label = serializers.CharField(allow_null=True)
class AqlQuerySerializer(serializers.Serializer):
query = serializers.CharField()
class AqlQueryTaskSerializer(serializers.ModelSerializer):
class Meta:
model = AqlQuery
exclude = ['results']
workspace = WorkspaceSerializer()
# Specify user as a CharField to return username
user = serializers.CharField()
class AqlQueryResultsSerializer(serializers.ModelSerializer):
class Meta:
model = AqlQuery
fields = ['id', 'workspace', 'user', 'results']
workspace = serializers.CharField()
user = serializers.CharField()
class LimitOffsetSerializer(serializers.Serializer):
limit = serializers.IntegerField(required=False)
offset = serializers.IntegerField(required=False)
class PaginatedResultSerializer(serializers.Serializer):
count = serializers.IntegerField()
previous = serializers.URLField(allow_null=True)
next = serializers.URLField(allow_null=True)
results = serializers.ListField(child=serializers.JSONField())
class TableRowRetrieveSerializer(LimitOffsetSerializer):
filter = serializers.JSONField(required=False)
# The required fields for table creation
class TableCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Table
fields = [
'name',
'edge',
]
read_only_fields = ['created']
# Used for full Table serialization / validation
class TableSerializer(TableCreateSerializer):
class Meta:
model = Table
fields = TableCreateSerializer.Meta.fields + [
'id',
'created',
'modified',
'workspace',
]
read_only_fields = ['created']
# Used for serializing Tables as responses
class TableReturnSerializer(TableSerializer):
workspace = WorkspaceSerializer()
class NetworkCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Network
fields = ['name', 'edge_table']
edge_table = serializers.CharField()
class NetworkSerializer(serializers.ModelSerializer):
class Meta:
model = Network
fields = '__all__'
class NetworkReturnSerializer(serializers.ModelSerializer):
class Meta:
model = Network
fields = ['id', 'name', 'created', 'modified']
class NetworkReturnDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Network
fields = [
'id',
'name',
'node_count',
'edge_count',
'created',
'modified',
'workspace',
]
workspace = WorkspaceSerializer()
class NetworkTablesSerializer(serializers.Serializer):
type = serializers.ChoiceField(choices=['node', 'edge', 'all'], default='all', required=False)
class UploadCreateSerializer(serializers.Serializer):
field_value = serializers.CharField()
class UploadReturnSerializer(serializers.ModelSerializer):
class Meta:
model = Upload
fields = '__all__'
workspace = WorkspaceSerializer()
# Specify blob as a CharField to coerce to object_key
blob = serializers.CharField()
# Specify user as a CharField to return username
user = serializers.CharField()
class CSVUploadCreateSerializer(UploadCreateSerializer):
edge = serializers.BooleanField()
table_name = serializers.CharField()
columns = serializers.DictField(
child=serializers.ChoiceField(choices=TableTypeAnnotation.Type.choices),
default=dict,
)
class D3JSONUploadCreateSerializer(UploadCreateSerializer):
network_name = serializers.CharField()
| 27.390351
| 98
| 0.681986
|
ace2f8f30b5925e1e78f513b4a44b3aecf71f7c4
| 232
|
py
|
Python
|
zoomus/components/__init__.py
|
appfluence/zoomus
|
a14e1f08700b9dad89f00b0d5c2a73a24d421c78
|
[
"Apache-2.0"
] | 2
|
2020-03-14T14:47:18.000Z
|
2020-04-06T23:20:54.000Z
|
zoomus/components/__init__.py
|
appfluence/zoomus
|
a14e1f08700b9dad89f00b0d5c2a73a24d421c78
|
[
"Apache-2.0"
] | null | null | null |
zoomus/components/__init__.py
|
appfluence/zoomus
|
a14e1f08700b9dad89f00b0d5c2a73a24d421c78
|
[
"Apache-2.0"
] | 1
|
2022-03-04T11:54:56.000Z
|
2022-03-04T11:54:56.000Z
|
"""Zoom.us REST API Python Client Components"""
from __future__ import absolute_import
from . import (
meeting,
recording,
report,
user,
webinar)
__author__ = "Patrick R. Schmid"
__email__ = "prschmid@act.md"
| 16.571429
| 47
| 0.685345
|
56b8309d77aa3f0e14658cb4753a0e198d03e4aa
| 4,921
|
py
|
Python
|
ioos_qc/argo.py
|
NOAA-PMEL/ioos_qc
|
bbe5a159275bd90f4b12b660776cf15557c10f0f
|
[
"Apache-2.0"
] | null | null | null |
ioos_qc/argo.py
|
NOAA-PMEL/ioos_qc
|
bbe5a159275bd90f4b12b660776cf15557c10f0f
|
[
"Apache-2.0"
] | null | null | null |
ioos_qc/argo.py
|
NOAA-PMEL/ioos_qc
|
bbe5a159275bd90f4b12b660776cf15557c10f0f
|
[
"Apache-2.0"
] | 1
|
2021-01-20T23:20:06.000Z
|
2021-01-20T23:20:06.000Z
|
#!/usr/bin/env python
# coding=utf-8
"""Tests based on the ARGO QC manual."""
import logging
import warnings
from numbers import Real as N
from typing import Sequence
import numpy as np
from ioos_qc.qartod import QartodFlags
from ioos_qc.utils import add_flag_metadata
from ioos_qc.utils import great_circle_distance
from ioos_qc.utils import mapdates
L = logging.getLogger(__name__) # noqa
@add_flag_metadata(stanard_name='pressure_increasing_test_quality_flag',
long_name='Pressure Increasing Test Quality Flag')
def pressure_increasing_test(inp):
"""
Returns an array of flag values where each input is flagged with SUSPECT if
it does not monotonically increase
Ref: ARGO QC Manual: 8. Pressure increasing test
Args:
inp: Pressure values as a numeric numpy array or a list of numbers.
Returns:
A masked array of flag values equal in size to that of the input.
"""
delta = np.diff(inp)
flags = np.ones_like(inp, dtype='uint8') * QartodFlags.GOOD
# Correct for downcast vs upcast by flipping the sign if it's decreasing
sign = np.sign(np.mean(delta))
if sign < 0:
delta = sign * delta
flag_idx = np.where(delta <= 0)[0] + 1
flags[flag_idx] = QartodFlags.SUSPECT
return flags
@add_flag_metadata(standard_name='speed_test_quality_flag',
long_name='Speed Test Quality Flag')
def speed_test(lon: Sequence[N],
lat: Sequence[N],
tinp: Sequence[N],
suspect_threshold: float,
fail_threshold: float
) -> np.ma.core.MaskedArray:
"""Checks that the calculated speed between two points is within reasonable bounds.
This test calculates a speed between subsequent points by
* using latitude and longitude to calculate the distance between points
* calculating the time difference between those points
* checking if distance/time_diff exceeds the given threshold(s)
Missing and masked data is flagged as UNKNOWN.
If this test fails, it typically means that either a position or time is bad data,
or that a platform is mislabeled.
Ref: ARGO QC Manual: 5. Impossible speed test
Args:
lon: Longitudes as a numeric numpy array or a list of numbers.
lat: Latitudes as a numeric numpy array or a list of numbers.
tinp: Time data as a sequence of datetime objects compatible with pandas DatetimeIndex.
This includes numpy datetime64, python datetime objects and pandas Timestamp object.
ie. pd.DatetimeIndex([datetime.utcnow(), np.datetime64(), pd.Timestamp.now()]
If anything else is passed in the format is assumed to be seconds since the unix epoch.
suspect_threshold: A float value representing a speed, in meters per second.
Speeds exceeding this will be flagged as SUSPECT.
fail_threshold: A float value representing a speed, in meters per second.
Speeds exceeding this will be flagged as FAIL.
Returns:
A masked array of flag values equal in size to that of the input.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
lat = np.ma.masked_invalid(np.array(lat).astype(np.floating))
lon = np.ma.masked_invalid(np.array(lon).astype(np.floating))
tinp = mapdates(tinp)
if lon.shape != lat.shape or lon.shape != tinp.shape:
raise ValueError(f'Lon ({lon.shape}) and lat ({lat.shape}) and tinp ({tinp.shape}) must be the same shape')
# Save original shape
original_shape = lon.shape
lon = lon.flatten()
lat = lat.flatten()
tinp = tinp.flatten()
# If no data, return
if lon.size == 0:
return np.ma.masked_array([])
# Start with everything as passing
flag_arr = QartodFlags.GOOD * np.ma.ones(lon.size, dtype='uint8')
# If either lon or lat are masked we just set the flag to MISSING
mloc = lon.mask & lat.mask
flag_arr[mloc] = QartodFlags.MISSING
# If only one data point, return
if lon.size < 2:
flag_arr[0] = QartodFlags.UNKNOWN
return flag_arr.reshape(original_shape)
# Calculate the great_distance between each point
dist = great_circle_distance(lat, lon)
# calculate speed in m/s
speed = np.ma.zeros(tinp.size, dtype='float')
speed[1:] = np.abs(dist[1:] / np.diff(tinp).astype('timedelta64[s]').astype(float))
with np.errstate(invalid='ignore'):
flag_arr[speed > suspect_threshold] = QartodFlags.SUSPECT
with np.errstate(invalid='ignore'):
flag_arr[speed > fail_threshold] = QartodFlags.FAIL
# first value is unknown, since we have no speed data for the first point
flag_arr[0] = QartodFlags.UNKNOWN
# If the value is masked set the flag to MISSING
flag_arr[dist.mask] = QartodFlags.MISSING
return flag_arr.reshape(original_shape)
| 36.183824
| 115
| 0.68482
|
0224a1191b3ff7aed461165e00b432a3698b96bd
| 279
|
py
|
Python
|
autocomplete/__init__.py
|
larryworm1127/autocomplete
|
d628afc369aa98d15570315bdb662bdbbf3df2e8
|
[
"MIT"
] | null | null | null |
autocomplete/__init__.py
|
larryworm1127/autocomplete
|
d628afc369aa98d15570315bdb662bdbbf3df2e8
|
[
"MIT"
] | null | null | null |
autocomplete/__init__.py
|
larryworm1127/autocomplete
|
d628afc369aa98d15570315bdb662bdbbf3df2e8
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from autocomplete.engine import (
LetterAutocompleteEngine,
SentenceAutocompleteEngine,
MelodyAutocompleteEngine
)
__all__ = [
'LetterAutocompleteEngine',
'SentenceAutocompleteEngine',
'MelodyAutocompleteEngine'
]
| 19.928571
| 38
| 0.781362
|
9a75c0021fce3dba8971277438fc421eae460de6
| 20,737
|
py
|
Python
|
enaml/qt/docking/q_dock_title_bar.py
|
pberkes/enaml
|
cbcbee929e3117dfe56c0b06dc2385acc832b0e8
|
[
"BSD-3-Clause-Clear"
] | 26
|
2016-04-01T18:49:31.000Z
|
2020-07-21T22:19:46.000Z
|
enaml/qt/docking/q_dock_title_bar.py
|
pberkes/enaml
|
cbcbee929e3117dfe56c0b06dc2385acc832b0e8
|
[
"BSD-3-Clause-Clear"
] | 29
|
2016-02-22T17:40:55.000Z
|
2018-08-21T18:18:36.000Z
|
enaml/qt/docking/q_dock_title_bar.py
|
pberkes/enaml
|
cbcbee929e3117dfe56c0b06dc2385acc832b0e8
|
[
"BSD-3-Clause-Clear"
] | 4
|
2016-08-29T13:07:19.000Z
|
2018-11-04T01:31:46.000Z
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from enaml.qt.QtCore import Qt, QSize, QPoint, QMargins, Signal
from enaml.qt.QtGui import QWidget, QFrame, QLineEdit, QHBoxLayout, QSizePolicy
from .q_bitmap_button import QBitmapButton, QCheckedBitmapButton
from .q_icon_widget import QIconWidget
from .q_text_label import QTextLabel
from .xbms import (
CLOSE_BUTTON, MAXIMIZE_BUTTON, RESTORE_BUTTON, LINKED_BUTTON,
UNLINKED_BUTTON, PIN_BUTTON, UNPIN_BUTTON
)
class IDockTitleBar(QWidget):
""" An interface class for defining a title bar.
"""
#: A signal emitted when the maximize button is clicked.
maximizeButtonClicked = Signal(bool)
#: A signal emitted when the restore button is clicked.
restoreButtonClicked = Signal(bool)
#: A signal emitted when the close button is clicked.
closeButtonClicked = Signal(bool)
#: A signal emitted when the link button is toggled.
linkButtonToggled = Signal(bool)
#: A signal emitted when the pin button is toggled.
pinButtonToggled = Signal(bool)
#: A signal emitted when the title is edited by the user.
titleEdited = Signal(unicode)
#: A signal emitted when the title bar is left double clicked.
leftDoubleClicked = Signal(QPoint)
#: A signal emitted when the title bar is right clicked.
rightClicked = Signal(QPoint)
#: Do not show any buttons in the title bar.
NoButtons = 0x0
#: Show the maximize button in the title bar.
MaximizeButton = 0x1
#: Show the restore button in the title bar.
RestoreButton = 0x2
#: Show the close button in the title bar.
CloseButton = 0x4
#: Show the link button in the title bar.
LinkButton = 0x8
#: Show the pin button in the title bar.
PinButton = 0x10
def buttons(self):
""" Get the buttons to show in the title bar.
Returns
-------
result : int
An or'd combination of the buttons to show.
"""
raise NotImplementedError
def setButtons(self, buttons):
""" Set the buttons to show in the title bar.
Parameters
----------
buttons : int
An or'd combination of the buttons to show.
"""
raise NotImplementedError
def title(self):
""" Get the title string of the title bar.
Returns
-------
result : unicode
The unicode title string for the title bar.
"""
raise NotImplementedError
def setTitle(self, title):
""" Set the title string of the title bar.
Parameters
----------
title : unicode
The unicode string to use for the title bar.
"""
raise NotImplementedError
def label(self):
""" Get the label for the title bar.
Returns
-------
result : QTextLabel
The label for the title bar.
"""
raise NotImplementedError
def icon(self):
""" Get the icon for the title bar.
Returns
-------
result : QIcon
The icon set for the title bar.
"""
raise NotImplementedError
def setIcon(self, icon):
""" Set the icon for the title bar.
Parameters
----------
icon : QIcon
The icon to use for the title bar.
"""
raise NotImplementedError
def iconSize(self):
""" Get the icon size for the title bar.
Returns
-------
result : QSize
The size to use for the icons in the title bar.
"""
raise NotImplementedError
def setIconSize(self, size):
""" Set the icon size for the title bar.
Parameters
----------
icon : QSize
The icon size to use for the title bar. Icons smaller than
this size will not be scaled up.
"""
raise NotImplementedError
def isLinked(self):
""" Get whether the link button is checked.
Returns
-------
result : bool
True if the link button is checked, False otherwise.
"""
raise NotImplementedError
def setLinked(self, linked):
""" Set whether or not the link button is checked.
Parameters
----------
linked : bool
True if the link button should be checked, False otherwise.
"""
raise NotImplementedError
def isPinned(self):
""" Get whether the pin button is checked.
Returns
-------
result : bool
True if the pin button is checked, False otherwise.
"""
raise NotImplementedError
def setPinned(self, pinned, quiet=False):
""" Set whether or not the pin button is checked.
Parameters
----------
pinned : bool
True if the pin button should be checked, False otherwise.
quiet : bool, optional
True if the state should be set without emitted the toggled
signal. The default is False.
"""
raise NotImplementedError
def isEditable(self):
""" Get whether the title is user editable.
Returns
-------
result : bool
True if the title is user editable, False otherwise.
"""
raise NotImplementedError
def setEditable(self, editable):
""" Set whether or not the title is user editable.
Parameters
----------
editable : bool
True if the title is user editable, False otherwise.
"""
raise NotImplementedError
def isForceHidden(self):
""" Get whether or not the title bar is force hidden.
Returns
-------
result : bool
Whether or not the title bar is force hidden.
"""
raise NotImplementedError
def setForceHidden(self, hidden):
""" Set the force hidden state of the title bar.
Parameters
----------
hidden : bool
True if the title bar should be hidden, False otherwise.
"""
raise NotImplementedError
class QDockTitleBar(QFrame, IDockTitleBar):
""" A concrete implementation of IDockTitleBar.
This class serves as the default title bar for a QDockItem.
"""
#: A signal emitted when the maximize button is clicked.
maximizeButtonClicked = Signal(bool)
#: A signal emitted when the restore button is clicked.
restoreButtonClicked = Signal(bool)
#: A signal emitted when the close button is clicked.
closeButtonClicked = Signal(bool)
#: A signal emitted when the link button is toggled.
linkButtonToggled = Signal(bool)
#: A signal emitted when the pin button is toggled.
pinButtonToggled = Signal(bool)
#: A signal emitted when the title is edited by the user.
titleEdited = Signal(unicode)
#: A signal emitted when the empty area is left double clicked.
leftDoubleClicked = Signal(QPoint)
#: A signal emitted when the empty area is right clicked.
rightClicked = Signal(QPoint)
def __init__(self, parent=None):
""" Initialize a QDockTitleBar.
Parameters
----------
parent : QWidget or None
The parent of the title bar.
"""
super(QDockTitleBar, self).__init__(parent)
self._buttons = self.CloseButton | self.MaximizeButton | self.PinButton
self._is_editable = False
self._force_hidden = False
self._last_visible = True
self._line_edit = None
title_icon = self._title_icon = QIconWidget(self)
title_icon.setVisible(False)
title_label = self._title_label = QTextLabel(self)
spacer = self._spacer = QWidget(self)
policy = spacer.sizePolicy()
policy.setHorizontalPolicy(QSizePolicy.Expanding)
spacer.setSizePolicy(policy)
btn_size = QSize(14, 13)
max_button = self._max_button = QBitmapButton(self)
max_button.setObjectName('docktitlebar-maximize-button')
max_button.setBitmap(MAXIMIZE_BUTTON.toBitmap())
max_button.setIconSize(btn_size)
max_button.setVisible(self._buttons & self.MaximizeButton)
max_button.setToolTip('Maximize')
restore_button = self._restore_button = QBitmapButton(self)
restore_button.setObjectName('docktitlebar-restore-button')
restore_button.setBitmap(RESTORE_BUTTON.toBitmap())
restore_button.setIconSize(btn_size)
restore_button.setVisible(self._buttons & self.RestoreButton)
restore_button.setToolTip('Restore Down')
close_button = self._close_button = QBitmapButton(self)
close_button.setObjectName('docktitlebar-close-button')
close_button.setBitmap(CLOSE_BUTTON.toBitmap())
close_button.setIconSize(btn_size)
close_button.setVisible(self._buttons & self.CloseButton)
close_button.setToolTip('Close')
link_button = self._link_button = QCheckedBitmapButton(self)
link_button.setObjectName('docktitlebar-link-button')
link_button.setBitmap(UNLINKED_BUTTON.toBitmap())
link_button.setCheckedBitmap(LINKED_BUTTON.toBitmap())
link_button.setIconSize(btn_size)
link_button.setVisible(self._buttons & self.LinkButton)
link_button.setToolTip('Link Window')
link_button.setCheckedToolTip('Unlink Window')
pin_button = self._pin_button = QCheckedBitmapButton(self)
pin_button.setObjectName('docktitlebar-pin-button')
pin_button.setBitmap(PIN_BUTTON.toBitmap())
pin_button.setCheckedBitmap(UNPIN_BUTTON.toBitmap())
pin_button.setIconSize(QSize(13, 13))
pin_button.setVisible(self._buttons & self.PinButton)
pin_button.setToolTip('Pin Window')
pin_button.setCheckedToolTip('Unpin Window')
layout = QHBoxLayout()
layout.setContentsMargins(QMargins(5, 2, 5, 2))
layout.setSpacing(1)
layout.addWidget(title_icon)
layout.addSpacing(0)
layout.addWidget(title_label)
layout.addWidget(spacer)
layout.addSpacing(4)
layout.addWidget(pin_button)
layout.addWidget(link_button)
layout.addWidget(max_button)
layout.addWidget(restore_button)
layout.addWidget(close_button)
self.setLayout(layout)
max_button.clicked.connect(self.maximizeButtonClicked)
restore_button.clicked.connect(self.restoreButtonClicked)
close_button.clicked.connect(self.closeButtonClicked)
link_button.toggled.connect(self.linkButtonToggled)
pin_button.toggled.connect(self.pinButtonToggled)
#--------------------------------------------------------------------------
# Event Handlers
#--------------------------------------------------------------------------
def mouseDoubleClickEvent(self, event):
""" Handle the mouse double click event for the title bar.
"""
event.ignore()
if event.button() == Qt.LeftButton:
pos = event.pos()
is_editable = self._is_editable
if self._adjustedLabelGeometry().contains(pos) and is_editable:
self._showTitleLineEdit()
event.accept()
return
if self._clickableGeometry().contains(pos):
self.leftDoubleClicked.emit(event.globalPos())
event.accept()
return
def mousePressEvent(self, event):
""" Handle the mouse press event for the title bar.
"""
event.ignore()
if event.button() == Qt.RightButton:
if self._clickableGeometry().contains(event.pos()):
self.rightClicked.emit(event.globalPos())
event.accept()
return
#--------------------------------------------------------------------------
# Overrides
#--------------------------------------------------------------------------
def setVisible(self, visible):
""" An overridden virtual visibility setter.
This handler enforces the force-hidden setting.
"""
self._last_visible = visible
if visible and self._force_hidden:
return
super(QDockTitleBar, self).setVisible(visible)
#--------------------------------------------------------------------------
# Private API
#--------------------------------------------------------------------------
def _adjustedLabelGeometry(self):
""" Get the adjust label geometry.
Returns
-------
result : QRect
A rectangle representing the label geometry which has been
adjusted for potentially empty text. This rect can be used
for a usable hit-testing rect for the label text.
"""
label = self._title_label
label_geo = label.geometry()
if not label.text():
label_geo = label_geo.adjusted(0, 0, 10, 0)
return label_geo
def _clickableGeometry(self):
""" Get the geometry rect which represents clickable area.
Returns
-------
result : QRect
A rectangle adjusted for the clickable geometry.
"""
rect = self.rect().adjusted(5, 2, -5, -2)
rect.setRight(self._spacer.geometry().right())
return rect
def _showTitleLineEdit(self):
""" Setup the line edit widget for editing the title.
"""
old_line_edit = self._line_edit
if old_line_edit is not None:
old_line_edit.hide()
old_line_edit.deleteLater()
line_edit = self._line_edit = QLineEdit(self)
line_edit.setFrame(False)
line_edit.setText(self._title_label.text())
line_edit.selectAll()
h = self._title_label.height()
line_edit.setMinimumHeight(h)
line_edit.setMaximumHeight(h)
line_edit.editingFinished.connect(self._onEditingFinished)
layout = self.layout()
idx = layout.indexOf(self._spacer)
layout.insertWidget(idx, line_edit)
self._spacer.hide()
self._title_label.hide()
line_edit.show()
line_edit.setFocus(Qt.MouseFocusReason)
def _onEditingFinished(self):
""" Handle the 'editingFinished' signal for title line edit.
"""
line_edit = self._line_edit
if line_edit is not None:
text = line_edit.text()
line_edit.hide()
line_edit.deleteLater()
self._line_edit = None
changed = self._title_label.text() != text
if changed:
self._title_label.setText(text)
self._title_label.show()
self._spacer.show()
if changed:
self.titleEdited.emit(text)
#--------------------------------------------------------------------------
# IDockItemTitleBar API
#--------------------------------------------------------------------------
def buttons(self):
""" Get the buttons to show in the title bar.
Returns
-------
result : int
An or'd combination of the buttons to show.
"""
return self._buttons
def setButtons(self, buttons):
""" Set the buttons to show in the title bar.
Parameters
----------
buttons : int
An or'd combination of the buttons to show.
"""
self._buttons = buttons
self._max_button.setVisible(buttons & self.MaximizeButton)
self._restore_button.setVisible(buttons & self.RestoreButton)
self._close_button.setVisible(buttons & self.CloseButton)
self._link_button.setVisible(buttons & self.LinkButton)
self._pin_button.setVisible(buttons & self.PinButton)
def title(self):
""" Get the title string of the title bar.
Returns
-------
result : unicode
The unicode title string for the title bar.
"""
return self._title_label.text()
def setTitle(self, title):
""" Set the title string of the title bar.
Parameters
----------
title : unicode
The unicode string to use for the title bar.
"""
self._title_label.setText(title)
def label(self):
""" Get the label which holds the title string.
Returns
-------
result : QTextLabel
The label widget which holds the title string.
"""
return self._title_label
def icon(self):
""" Get the icon for the title bar.
Returns
-------
result : QIcon
The icon set for the title bar.
"""
return self._title_icon.icon()
def setIcon(self, icon):
""" Set the icon for the title bar.
Parameters
----------
icon : QIcon
The icon to use for the title bar.
"""
visible, spacing = (False, 0) if icon.isNull() else (True, 4)
title_icon = self._title_icon
title_icon.setIcon(icon)
title_icon.setVisible(visible)
layout = self.layout()
layout.takeAt(1)
layout.insertSpacing(1, spacing)
def iconSize(self):
""" Get the icon size for the title bar.
Returns
-------
result : QSize
The size to use for the icons in the title bar.
"""
return self._title_icon.iconSize()
def setIconSize(self, size):
""" Set the icon size for the title bar.
Parameters
----------
icon : QSize
The icon size to use for the title bar. Icons smaller than
this size will not be scaled up.
"""
self._title_icon.setIconSize(size)
def isLinked(self):
""" Get whether the link button is checked.
Returns
-------
result : bool
True if the link button is checked, False otherwise.
"""
return self._link_button.isChecked()
def setLinked(self, linked):
""" Set whether or not the link button is checked.
Parameters
----------
linked : bool
True if the link button should be checked, False otherwise.
"""
self._link_button.setChecked(linked)
def isPinned(self):
""" Get whether the pin button is checked.
Returns
-------
result : bool
True if the pin button is checked, False otherwise.
"""
return self._pin_button.isChecked()
def setPinned(self, pinned, quiet=False):
""" Set whether or not the pin button is checked.
Parameters
----------
pinned : bool
True if the pin button should be checked, False otherwise.
quiet : bool, optional
True if the state should be set without emitted the toggled
signal. The default is False.
"""
old = self._pin_button.blockSignals(quiet)
self._pin_button.setChecked(pinned)
self._pin_button.blockSignals(old)
def isEditable(self):
""" Get whether the title is user editable.
Returns
-------
result : bool
True if the title is user editable, False otherwise.
"""
return self._is_editable
def setEditable(self, editable):
""" Set whether or not the title is user editable.
Parameters
----------
editable : bool
True if the title is user editable, False otherwise.
"""
self._is_editable = editable
def isForceHidden(self):
""" Get whether or not the title bar is force hidden.
Returns
-------
result : bool
Whether or not the title bar is always hidden.
"""
return self._force_hidden
def setForceHidden(self, hidden):
""" Set the force hidden state of the title bar.
Parameters
----------
hidden : bool
True if the title bar should be hidden, False otherwise.
"""
self._force_hidden = hidden
if not hidden and self._last_visible:
super(QDockTitleBar, self).setVisible(True)
elif hidden:
super(QDockTitleBar, self).setVisible(False)
| 29.248237
| 79
| 0.579303
|
4618db6b0beaa3905a677303351dae3e2785a1e7
| 8,537
|
py
|
Python
|
locust/user/users.py
|
odidev/locust
|
34277aacd6b634b77cd3a12c786cda42da655c9d
|
[
"MIT"
] | null | null | null |
locust/user/users.py
|
odidev/locust
|
34277aacd6b634b77cd3a12c786cda42da655c9d
|
[
"MIT"
] | 11
|
2021-11-10T22:15:18.000Z
|
2022-03-28T22:20:23.000Z
|
locust/user/users.py
|
odidev/locust
|
34277aacd6b634b77cd3a12c786cda42da655c9d
|
[
"MIT"
] | null | null | null |
from locust.user.wait_time import constant
from typing import Any, Callable, Dict, List, TypeVar, Union
from typing_extensions import final
from gevent import GreenletExit, greenlet
from gevent.pool import Group
from locust.clients import HttpSession
from locust.exception import LocustError, StopUser
from locust.util import deprecation
from .task import (
TaskSet,
DefaultTaskSet,
get_tasks_from_base_classes,
LOCUST_STATE_RUNNING,
LOCUST_STATE_WAITING,
LOCUST_STATE_STOPPING,
)
class UserMeta(type):
"""
Meta class for the main User class. It's used to allow User classes to specify task execution
ratio using an {task:int} dict, or a [(task0,int), ..., (taskN,int)] list.
"""
def __new__(mcs, classname, bases, class_dict):
# gather any tasks that is declared on the class (or it's bases)
tasks = get_tasks_from_base_classes(bases, class_dict)
class_dict["tasks"] = tasks
if not class_dict.get("abstract"):
# Not a base class
class_dict["abstract"] = False
deprecation.check_for_deprecated_task_set_attribute(class_dict)
return type.__new__(mcs, classname, bases, class_dict)
class User(object, metaclass=UserMeta):
"""
Represents a "user" which is to be spawned and attack the system that is to be load tested.
The behaviour of this user is defined by its tasks. Tasks can be declared either directly on the
class by using the :py:func:`@task decorator <locust.task>` on methods, or by setting
the :py:attr:`tasks attribute <locust.User.tasks>`.
This class should usually be subclassed by a class that defines some kind of client. For
example when load testing an HTTP system, you probably want to use the
:py:class:`HttpUser <locust.HttpUser>` class.
"""
host: str = None
"""Base hostname to swarm. i.e: http://127.0.0.1:1234"""
min_wait = None
"""Deprecated: Use wait_time instead. Minimum waiting time between the execution of locust tasks"""
max_wait = None
"""Deprecated: Use wait_time instead. Maximum waiting time between the execution of locust tasks"""
wait_time = constant(0)
"""
Method that returns the time (in seconds) between the execution of locust tasks.
Can be overridden for individual TaskSets.
Example::
from locust import User, between
class MyUser(User):
wait_time = between(3, 25)
"""
wait_function = None
"""
.. warning::
DEPRECATED: Use wait_time instead. Note that the new wait_time method should return seconds and not milliseconds.
Method that returns the time between the execution of locust tasks in milliseconds
"""
tasks: List[Union[TaskSet, Callable]] = []
"""
Collection of python callables and/or TaskSet classes that the Locust user(s) will run.
If tasks is a list, the task to be performed will be picked randomly.
If tasks is a *(callable,int)* list of two-tuples, or a {callable:int} dict,
the task to be performed will be picked randomly, but each task will be weighted
according to its corresponding int value. So in the following case, *ThreadPage* will
be fifteen times more likely to be picked than *write_post*::
class ForumPage(TaskSet):
tasks = {ThreadPage:15, write_post:1}
"""
weight = 1
"""Probability of user class being chosen. The higher the weight, the greater the chance of it being chosen."""
abstract = True
"""If abstract is True, the class is meant to be subclassed, and locust will not spawn users of this class during a test."""
environment = None
"""A reference to the :py:class:`Environment <locust.env.Environment>` in which this user is running"""
client = None
_state = None
_greenlet: greenlet.Greenlet = None
_group: Group
_taskset_instance = None
def __init__(self, environment):
super().__init__()
self.environment = environment
def on_start(self):
"""
Called when a User starts running.
"""
pass
def on_stop(self):
"""
Called when a User stops running (is killed)
"""
pass
@final
def run(self):
self._state = LOCUST_STATE_RUNNING
self._taskset_instance = DefaultTaskSet(self)
try:
# run the TaskSet on_start method, if it has one
self.on_start()
self._taskset_instance.run()
except (GreenletExit, StopUser):
# run the on_stop method, if it has one
self.on_stop()
def wait(self):
"""
Make the running user sleep for a duration defined by the User.wait_time
function.
The user can also be killed gracefully while it's sleeping, so calling this
method within a task makes it possible for a user to be killed mid-task even if you've
set a stop_timeout. If this behaviour is not desired, you should make the user wait using
gevent.sleep() instead.
"""
self._taskset_instance.wait()
def start(self, group: Group):
"""
Start a greenlet that runs this User instance.
:param group: Group instance where the greenlet will be spawned.
:type group: gevent.pool.Group
:returns: The spawned greenlet.
"""
def run_user(user):
"""
Main function for User greenlet. It's important that this function takes the user
instance as an argument, since we use greenlet_instance.args[0] to retrieve a reference to the
User instance.
"""
user.run()
self._greenlet = group.spawn(run_user, self)
self._group = group
return self._greenlet
def stop(self, force=False):
"""
Stop the user greenlet.
:param force: If False (the default) the stopping is done gracefully by setting the state to LOCUST_STATE_STOPPING
which will make the User instance stop once any currently running task is complete and on_stop
methods are called. If force is True the greenlet will be killed immediately.
:returns: True if the greenlet was killed immediately, otherwise False
"""
if force or self._state == LOCUST_STATE_WAITING:
self._group.killone(self._greenlet)
return True
elif self._state == LOCUST_STATE_RUNNING:
self._state = LOCUST_STATE_STOPPING
return False
@property
def group(self):
return self._group
@property
def greenlet(self):
return self._greenlet
def context(self) -> Dict:
"""
Adds the returned value (a dict) to the context for :ref:`request event <request_context>`
"""
return {}
@classmethod
def fullname(cls) -> str:
"""Fully qualified name of the user class, e.g. my_package.my_module.MyUserClass"""
return ".".join(filter(lambda x: x != "<locals>", (cls.__module__ + "." + cls.__qualname__).split(".")))
class HttpUser(User):
"""
Represents an HTTP "user" which is to be spawned and attack the system that is to be load tested.
The behaviour of this user is defined by its tasks. Tasks can be declared either directly on the
class by using the :py:func:`@task decorator <locust.task>` on methods, or by setting
the :py:attr:`tasks attribute <locust.User.tasks>`.
This class creates a *client* attribute on instantiation which is an HTTP client with support
for keeping a user session between requests.
"""
abstract = True
"""If abstract is True, the class is meant to be subclassed, and users will not choose this locust during a test"""
client: HttpSession = None
"""
Instance of HttpSession that is created upon instantiation of Locust.
The client supports cookies, and therefore keeps the session between HTTP requests.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.host is None:
raise LocustError(
"You must specify the base host. Either in the host attribute in the User class, or on the command line using the --host option."
)
session = HttpSession(
base_url=self.host,
request_event=self.environment.events.request,
user=self,
)
session.trust_env = False
self.client = session
| 34.844898
| 145
| 0.655734
|
2d35d376295307c370aec37ad49a096a3f91b105
| 2,732
|
py
|
Python
|
src/OTLMOW/OTLModel/Datatypes/KlCabineStandaardtype.py
|
davidvlaminck/OTLClassPython
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | 2
|
2022-02-01T08:58:11.000Z
|
2022-02-08T13:35:17.000Z
|
src/OTLMOW/OTLModel/Datatypes/KlCabineStandaardtype.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
src/OTLMOW/OTLModel/Datatypes/KlCabineStandaardtype.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlCabineStandaardtype(KeuzelijstField):
"""Veel voorkomende types van cabines."""
naam = 'KlCabineStandaardtype'
label = 'Cabine standaardtype'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlCabineStandaardtype'
definition = 'Veel voorkomende types van cabines.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlCabineStandaardtype'
options = {
'aluminium-betreedbaar': KeuzelijstWaarde(invulwaarde='aluminium-betreedbaar',
label='aluminium betreedbaar',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlCabineStandaardtype/aluminium-betreedbaar'),
'aluminium-niet-betreedbaar': KeuzelijstWaarde(invulwaarde='aluminium-niet-betreedbaar',
label='aluminium niet betreedbaar',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlCabineStandaardtype/aluminium-niet-betreedbaar'),
'beton-betreedbaar': KeuzelijstWaarde(invulwaarde='beton-betreedbaar',
label='beton betreedbaar',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlCabineStandaardtype/beton-betreedbaar'),
'beton-niet-betreedbaar-(compactstation)': KeuzelijstWaarde(invulwaarde='beton-niet-betreedbaar-(compactstation)',
label='beton niet betreedbaar (compactstation)',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlCabineStandaardtype/beton-niet-betreedbaar-(compactstation)'),
'gemetst-betreedbaar': KeuzelijstWaarde(invulwaarde='gemetst-betreedbaar',
label='gemetst betreedbaar',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlCabineStandaardtype/gemetst-betreedbaar'),
'lokaal-in-een-gebouw': KeuzelijstWaarde(invulwaarde='lokaal-in-een-gebouw',
label='lokaal in een gebouw',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlCabineStandaardtype/lokaal-in-een-gebouw')
}
| 78.057143
| 196
| 0.610908
|
8ef0aa88df6a2365f0efe19db6006bd842ade7f3
| 2,567
|
py
|
Python
|
tempest/services/compute/xml/services_client.py
|
BeenzSyed/tempest
|
7a64ee1216d844f6b99928b53f5c665b84cb8719
|
[
"Apache-2.0"
] | null | null | null |
tempest/services/compute/xml/services_client.py
|
BeenzSyed/tempest
|
7a64ee1216d844f6b99928b53f5c665b84cb8719
|
[
"Apache-2.0"
] | null | null | null |
tempest/services/compute/xml/services_client.py
|
BeenzSyed/tempest
|
7a64ee1216d844f6b99928b53f5c665b84cb8719
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 NEC Corporation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from lxml import etree
from tempest.common.rest_client import RestClientXML
from tempest.services.compute.xml.common import Document
from tempest.services.compute.xml.common import Element
from tempest.services.compute.xml.common import xml_to_json
class ServicesClientXML(RestClientXML):
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(ServicesClientXML, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.compute.catalog_type
def list_services(self, params=None):
url = 'os-services'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url, self.headers)
node = etree.fromstring(body)
body = [xml_to_json(x) for x in node.getchildren()]
return resp, body
def enable_service(self, host_name, binary):
"""
Enable service on a host
host_name: Name of host
binary: Service binary
"""
post_body = Element("service")
post_body.add_attr('binary', binary)
post_body.add_attr('host', host_name)
resp, body = self.put('os-services/enable', str(Document(post_body)),
self.headers)
body = xml_to_json(etree.fromstring(body))
return resp, body
def disable_service(self, host_name, binary):
"""
Disable service on a host
host_name: Name of host
binary: Service binary
"""
post_body = Element("service")
post_body.add_attr('binary', binary)
post_body.add_attr('host', host_name)
resp, body = self.put('os-services/disable', str(Document(post_body)),
self.headers)
body = xml_to_json(etree.fromstring(body))
return resp, body
| 35.652778
| 79
| 0.650954
|
fc8e7a7149542bee67819a7f82e753381d8b366e
| 9,061
|
py
|
Python
|
ryu/services/protocols/bgp/application.py
|
shiyanlou/ryu
|
cdd7084b941160f3b948d9c98fcc549784444b29
|
[
"Apache-2.0"
] | 1
|
2019-09-11T11:56:19.000Z
|
2019-09-11T11:56:19.000Z
|
tools/dockerize/webportal/usr/lib/python2.7/site-packages/ryu/services/protocols/bgp/application.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | null | null | null |
tools/dockerize/webportal/usr/lib/python2.7/site-packages/ryu/services/protocols/bgp/application.py
|
foruy/openflow-multiopenstack
|
74140b041ac25ed83898ff3998e8dcbed35572bb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines bases classes to create a BGP application.
"""
import imp
import logging
import traceback
from os import path
from oslo.config import cfg
from ryu.lib import hub
from ryu.base.app_manager import RyuApp
from ryu.services.protocols.bgp.api.base import call
from ryu.services.protocols.bgp.base import add_bgp_error_metadata
from ryu.services.protocols.bgp.base import BGPSException
from ryu.services.protocols.bgp.base import BIN_ERROR
from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
from ryu.services.protocols.bgp import net_ctrl
from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
from ryu.services.protocols.bgp.rtconf.common import BGP_SERVER_PORT
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_BGP_SERVER_PORT
from ryu.services.protocols.bgp.rtconf.common import \
DEFAULT_REFRESH_MAX_EOR_TIME
from ryu.services.protocols.bgp.rtconf.common import \
DEFAULT_REFRESH_STALEPATH_TIME
from ryu.services.protocols.bgp.rtconf.common import DEFAULT_LABEL_RANGE
from ryu.services.protocols.bgp.rtconf.common import LABEL_RANGE
from ryu.services.protocols.bgp.rtconf.common import LOCAL_AS
from ryu.services.protocols.bgp.rtconf.common import REFRESH_MAX_EOR_TIME
from ryu.services.protocols.bgp.rtconf.common import REFRESH_STALEPATH_TIME
from ryu.services.protocols.bgp.rtconf.common import ROUTER_ID
from ryu.services.protocols.bgp.rtconf import neighbors
from ryu.services.protocols.bgp.rtconf import vrfs
from ryu.services.protocols.bgp.utils.dictconfig import dictConfig
from ryu.services.protocols.bgp.utils.validation import is_valid_ipv4
from ryu.services.protocols.bgp.operator import ssh
LOG = logging.getLogger('bgpspeaker.application')
CONF = cfg.CONF
CONF.register_opts([
cfg.IntOpt('bind-port', default=50002, help='rpc-port'),
cfg.StrOpt('bind-ip', default='0.0.0.0', help='rpc-bind-ip'),
cfg.StrOpt('bgp-config-file', default=None,
help='bgp-config-file')
])
@add_bgp_error_metadata(code=BIN_ERROR,
sub_code=1,
def_desc='Unknown bootstrap exception.')
class ApplicationException(BGPSException):
"""Specific Base exception related to `BSPSpeaker`."""
pass
class RyuBGPSpeaker(RyuApp):
def __init__(self, *args, **kwargs):
self.bind_ip = RyuBGPSpeaker.validate_rpc_ip(CONF.bind_ip)
self.bind_port = RyuBGPSpeaker.validate_rpc_port(CONF.bind_port)
self.config_file = CONF.bgp_config_file
super(RyuBGPSpeaker, self).__init__(*args, **kwargs)
def start(self):
# Only two main green threads are required for APGW bgp-agent.
# One for NetworkController, another for BGPS core.
# If configuration file was provided and loaded successfully. We start
# BGPS core using these settings. If no configuration file is provided
# or if configuration file is missing minimum required settings BGPS
# core is not started.
if self.config_file:
LOG.debug('Loading config. from settings file.')
settings = self.load_config(self.config_file)
# Configure log settings, if available.
if getattr(settings, 'LOGGING', None):
dictConfig(settings.LOGGING)
if getattr(settings, 'BGP', None):
self._start_core(settings)
if getattr(settings, 'SSH', None) is not None:
hub.spawn(ssh.SSH_CLI_CONTROLLER.start, None, **settings.SSH)
# Start Network Controller to server RPC peers.
t = hub.spawn(net_ctrl.NET_CONTROLLER.start, *[],
**{net_ctrl.NC_RPC_BIND_IP: self.bind_ip,
net_ctrl.NC_RPC_BIND_PORT: self.bind_port})
LOG.debug('Started Network Controller')
super(RyuBGPSpeaker, self).start()
return t
@classmethod
def validate_rpc_ip(cls, ip):
"""Validates given ip for use as rpc host bind address.
"""
if not is_valid_ipv4(ip):
raise ApplicationException(desc='Invalid rpc ip address.')
return ip
@classmethod
def validate_rpc_port(cls, port):
"""Validates give port for use as rpc server port.
"""
if not port:
raise ApplicationException(desc='Invalid rpc port number.')
if isinstance(port, str):
port = int(port)
return port
def load_config(self, config_file):
"""Validates give file as settings file for BGPSpeaker.
Load the configuration from file as settings module.
"""
if not config_file or not isinstance(config_file, str):
raise ApplicationException('Invalid configuration file.')
# Check if file can be read
try:
return imp.load_source('settings', config_file)
except Exception as e:
raise ApplicationException(desc=str(e))
def _start_core(self, settings):
"""Starts BGPS core using setting and given pool.
"""
# Get common settings
routing_settings = settings.BGP.get('routing')
common_settings = {}
# Get required common settings.
try:
common_settings[LOCAL_AS] = routing_settings.pop(LOCAL_AS)
common_settings[ROUTER_ID] = routing_settings.pop(ROUTER_ID)
except KeyError as e:
raise ApplicationException(
desc='Required minimum configuration missing %s' %
e)
# Get optional common settings
common_settings[BGP_SERVER_PORT] = \
routing_settings.get(BGP_SERVER_PORT, DEFAULT_BGP_SERVER_PORT)
common_settings[REFRESH_STALEPATH_TIME] = \
routing_settings.get(REFRESH_STALEPATH_TIME,
DEFAULT_REFRESH_STALEPATH_TIME)
common_settings[REFRESH_MAX_EOR_TIME] = \
routing_settings.get(REFRESH_MAX_EOR_TIME,
DEFAULT_REFRESH_MAX_EOR_TIME)
common_settings[LABEL_RANGE] = \
routing_settings.get(LABEL_RANGE, DEFAULT_LABEL_RANGE)
# Start BGPS core service
waiter = hub.Event()
call('core.start', waiter=waiter, **common_settings)
waiter.wait()
LOG.debug('Core started %s', CORE_MANAGER.started)
# Core manager started add configured neighbor and vrfs
if CORE_MANAGER.started:
# Add neighbors.
self._add_neighbors(routing_settings)
# Add Vrfs.
self._add_vrfs(routing_settings)
# Add Networks
self._add_networks(routing_settings)
def _add_neighbors(self, routing_settings):
"""Add bgp peers/neighbors from given settings to BGPS runtime.
All valid neighbors are loaded. Miss-configured neighbors are ignored
and error is logged.
"""
bgp_neighbors = routing_settings.setdefault('bgp_neighbors', {})
for ip, bgp_neighbor in bgp_neighbors.items():
try:
bgp_neighbor[neighbors.IP_ADDRESS] = ip
call('neighbor.create', **bgp_neighbor)
LOG.debug('Added neighbor %s', ip)
except RuntimeConfigError as re:
LOG.error(re)
LOG.error(traceback.format_exc())
continue
def _add_vrfs(self, routing_settings):
"""Add VRFs from given settings to BGPS runtime.
If any of the VRFs are miss-configured errors are logged.
All valid VRFs are loaded.
"""
vpns_conf = routing_settings.setdefault('vpns', {})
for vrfname, vrf in vpns_conf.iteritems():
try:
vrf[vrfs.VRF_NAME] = vrfname
call('vrf.create', **vrf)
LOG.debug('Added vrf %s', vrf)
except RuntimeConfigError as e:
LOG.error(e)
continue
def _add_networks(self, routing_settings):
"""Add networks from given settings to BGPS runtime.
If any of the networks are miss-configured errors are logged.
All valid networks are loaded.
"""
networks = routing_settings.setdefault('networks', [])
for prefix in networks:
try:
call('network.add', prefix=prefix)
LOG.debug('Added network %s', prefix)
except RuntimeConfigError as e:
LOG.error(e)
continue
| 38.888412
| 78
| 0.664938
|
586b214bb70b7ebe5c92c0096f2729499c92996c
| 4,639
|
py
|
Python
|
tests/traffic/test_ip_device_and_flow.py
|
albertovillarreal-keys/snappi-ixnetwork
|
c72673580f5a2c530f033469f542cbea36a49c2c
|
[
"MIT"
] | 4
|
2020-11-03T06:03:48.000Z
|
2021-06-29T03:49:44.000Z
|
tests/traffic/test_ip_device_and_flow.py
|
albertovillarreal-keys/snappi-ixnetwork
|
c72673580f5a2c530f033469f542cbea36a49c2c
|
[
"MIT"
] | 275
|
2020-09-23T15:19:09.000Z
|
2021-06-29T10:53:56.000Z
|
tests/traffic/test_ip_device_and_flow.py
|
albertovillarreal-keys/snappi-ixnetwork
|
c72673580f5a2c530f033469f542cbea36a49c2c
|
[
"MIT"
] | 1
|
2020-10-12T19:33:46.000Z
|
2020-10-12T19:33:46.000Z
|
import pytest
@pytest.mark.e2e
def test_ip_device_and_flow(api, b2b_raw_config, utils):
"""
Configure the devices on Tx and Rx Port.
Configure the flow with devices as end points.
run the traffic
Validation,
- validate the port and flow statistics.
"""
size = 128
packets = 100000
count = 10
mac_tx = utils.mac_or_ip_addr_from_counter_pattern(
"00:10:10:20:20:10", "00:00:00:00:00:01", count, True
)
mac_rx = utils.mac_or_ip_addr_from_counter_pattern(
"00:10:10:20:20:20", "00:00:00:00:00:01", count, False
)
ip_tx = utils.mac_or_ip_addr_from_counter_pattern(
"10.1.1.1", "0.0.1.0", count, True, False
)
ip_rx = utils.mac_or_ip_addr_from_counter_pattern(
"10.1.1.2", "0.0.1.0", count, True, False
)
addrs = {
"mac_tx": mac_tx,
"mac_rx": mac_rx,
"ip_tx": ip_tx,
"ip_rx": ip_rx,
}
for i in range(count * 2):
port = int(i / count)
node = "tx" if port == 0 else "rx"
if i >= count:
i = i - count
dev = b2b_raw_config.devices.device()[-1]
dev.name = "%s_dev_%d" % (node, i + 1)
dev.container_name = b2b_raw_config.ports[port].name
dev.ethernet.name = "%s_eth_%d" % (node, i + 1)
dev.ethernet.mac = addrs["mac_%s" % node][i]
dev.ethernet.ipv4.name = "%s_ipv4_%d" % (node, i + 1)
dev.ethernet.ipv4.address = addrs["ip_%s" % node][i]
dev.ethernet.ipv4.gateway = addrs[
"ip_%s" % ("rx" if node == "tx" else "tx")
][i]
dev.ethernet.ipv4.prefix = 24
f1, f2 = b2b_raw_config.flows.flow(name="TxFlow-2")
f1.name = "TxFlow-1"
f1.tx_rx.device.tx_names = [
b2b_raw_config.devices[i].name for i in range(count)
]
f1.tx_rx.device.rx_names = [
b2b_raw_config.devices[i].name for i in range(count, count * 2)
]
f1.tx_rx.device.mode = f2.tx_rx.device.ONE_TO_ONE
f1.size.fixed = size
f1.duration.fixed_packets.packets = packets
f1.rate.percentage = "10"
f2.tx_rx.device.tx_names = [
b2b_raw_config.devices[i].name for i in range(count)
]
f2.tx_rx.device.rx_names = [
b2b_raw_config.devices[i].name for i in range(count, count * 2)
]
f2.tx_rx.device.mode = f2.tx_rx.device.ONE_TO_ONE
f2.packet.ethernet().ipv4().tcp()
tcp = f2.packet[-1]
tcp.src_port.increment.start = "5000"
tcp.src_port.increment.step = "1"
tcp.src_port.increment.count = "%d" % count
tcp.dst_port.increment.start = "2000"
tcp.dst_port.increment.step = "1"
tcp.dst_port.increment.count = "%d" % count
f2.size.fixed = size * 2
f2.duration.fixed_packets.packets = packets
f2.rate.percentage = "10"
utils.start_traffic(api, b2b_raw_config)
utils.wait_for(
lambda: results_ok(api, utils, size, size * 2, packets),
"stats to be as expected",
timeout_seconds=20,
)
utils.stop_traffic(api, b2b_raw_config)
captures_ok(api, b2b_raw_config, utils, count, packets * 2)
def results_ok(api, utils, size1, size2, packets):
"""
Returns true if stats are as expected, false otherwise.
"""
port_results, flow_results = utils.get_all_stats(api)
frames_ok = utils.total_frames_ok(port_results, flow_results, packets * 2)
bytes_ok = utils.total_bytes_ok(
port_results, flow_results, packets * size1 + packets * size2
)
return frames_ok and bytes_ok
def captures_ok(api, cfg, utils, count, packets):
"""
Returns normally if patterns in captured packets are as expected.
"""
src_mac = [[0x00, 0x10, 0x10, 0x20, 0x20, 0x10 + i] for i in range(count)]
dst_mac = [[0x00, 0x10, 0x10, 0x20, 0x20, 0x20 - i] for i in range(count)]
src_ip = [[0x0A, 0x01, 0x01 + i, 0x01] for i in range(count)]
dst_ip = [[0x0A, 0x01, 0x01 + i, 0x02] for i in range(count)]
src_port = [[0x13, 0x88 + i] for i in range(count)]
dst_port = [[0x07, 0xD0 + i] for i in range(count)]
cap_dict = utils.get_all_captures(api, cfg)
assert len(cap_dict) == 1
sizes = [128, 256]
size_dt = {128: [0 for i in range(count)], 256: [0 for i in range(count)]}
for b in cap_dict[list(cap_dict.keys())[0]]:
i = dst_mac.index(b[0:6])
assert b[0:6] == dst_mac[i] and b[6:12] == src_mac[i]
assert b[26:30] == src_ip[i] and b[30:34] == dst_ip[i]
assert len(b) in sizes
size_dt[len(b)][i] += 1
if len(b) == 256:
assert b[34:36] == src_port[i] and b[36:38] == dst_port[i]
assert sum(size_dt[128]) + sum(size_dt[256]) == packets
| 33.135714
| 78
| 0.610045
|
561bad3a1b7821e1e9f5aee592781d190d7ef5bb
| 8,905
|
py
|
Python
|
tests/03_tic_tac_toe.py
|
ephdtrg/eosfactory
|
721843707cb277618142c6d9518e3f231cae3b79
|
[
"MIT"
] | null | null | null |
tests/03_tic_tac_toe.py
|
ephdtrg/eosfactory
|
721843707cb277618142c6d9518e3f231cae3b79
|
[
"MIT"
] | null | null | null |
tests/03_tic_tac_toe.py
|
ephdtrg/eosfactory
|
721843707cb277618142c6d9518e3f231cae3b79
|
[
"MIT"
] | null | null | null |
import unittest, argparse, sys, time
from eosfactory.eosf import *
verbosity([Verbosity.INFO, Verbosity.OUT, Verbosity.TRACE])
CONTRACT_WORKSPACE = "03_tic_tac_toe"
INITIAL_RAM_KBYTES = 8
INITIAL_STAKE_NET = 3
INITIAL_STAKE_CPU = 3
class Test(unittest.TestCase):
def stats():
print_stats(
[master, host, alice, carol],
[
"core_liquid_balance",
"ram_usage",
"ram_quota",
"total_resources.ram_bytes",
"self_delegated_bandwidth.net_weight",
"self_delegated_bandwidth.cpu_weight",
"total_resources.net_weight",
"total_resources.cpu_weight",
"net_limit.available",
"net_limit.max",
"net_limit.used",
"cpu_limit.available",
"cpu_limit.max",
"cpu_limit.used"
]
)
@classmethod
def setUpClass(cls):
SCENARIO('''
There is the ``master`` account that sponsors the ``host``
account equipped with an instance of the ``tic_tac_toe`` smart contract. There
are two players ``alice`` and ``carol``. We are testing that the moves of
the game are correctly stored in the blockchain database.
''')
testnet.verify_production()
create_master_account("master", testnet)
create_account(
"host", master, buy_ram_kbytes=INITIAL_RAM_KBYTES,
stake_net=INITIAL_STAKE_NET, stake_cpu=INITIAL_STAKE_CPU)
create_account(
"alice", master, buy_ram_kbytes=INITIAL_RAM_KBYTES,
stake_net=INITIAL_STAKE_NET, stake_cpu=INITIAL_STAKE_CPU)
create_account(
"carol", master, buy_ram_kbytes=INITIAL_RAM_KBYTES,
stake_net=INITIAL_STAKE_NET, stake_cpu=INITIAL_STAKE_CPU)
if not testnet.is_local():
cls.stats()
if (extra_ram > 0):
master.buy_ram(extra_ram, host)
master.buy_ram(extra_ram, alice)
master.buy_ram(extra_ram, carol)
if (extra_stake_net > 0 or extra_stake_cpu > 0):
master.delegate_bw(extra_stake_net, extra_stake_cpu, host)
master.delegate_bw(extra_stake_net, extra_stake_cpu, alice)
master.delegate_bw(extra_stake_net, extra_stake_cpu, carol)
if (extra_ram > 0 or extra_stake_net > 0 or extra_stake_cpu > 0):
cls.stats()
contract = Contract(host, CONTRACT_WORKSPACE)
contract.build(force=False)
try:
contract.deploy(payer=master)
except errors.ContractRunningError:
pass
def setUp(self):
pass
def test_01(self):
COMMENT('''
Attempting to create a new game.
This might fail if the previous game has not been closes properly:
''')
try:
host.push_action(
"create",
{
"challenger": alice,
"host": carol
},
permission=(carol, Permission.ACTIVE))
except Error as e:
if "game already exists" in e.message:
COMMENT('''
We need to close the previous game before creating a new one:
''')
host.push_action(
"close",
{
"challenger": alice,
"host": carol
},
permission=(carol, Permission.ACTIVE))
time.sleep(3)
COMMENT('''
Second attempt to create a new game:
''')
host.push_action(
"create",
{
"challenger": alice,
"host": carol
},
permission=(carol, Permission.ACTIVE))
else:
COMMENT('''
The error is different than expected.
''')
raise Error(str(e))
t = host.table("games", carol)
self.assertEqual(t.json["rows"][0]["board"][0], 0)
self.assertEqual(t.json["rows"][0]["board"][1], 0)
self.assertEqual(t.json["rows"][0]["board"][2], 0)
self.assertEqual(t.json["rows"][0]["board"][3], 0)
self.assertEqual(t.json["rows"][0]["board"][4], 0)
self.assertEqual(t.json["rows"][0]["board"][5], 0)
self.assertEqual(t.json["rows"][0]["board"][6], 0)
self.assertEqual(t.json["rows"][0]["board"][7], 0)
self.assertEqual(t.json["rows"][0]["board"][8], 0)
COMMENT('''
First move is by carol:
''')
host.push_action(
"move",
{
"challenger": alice,
"host": carol,
"by": carol,
"row":0, "column":0
},
permission=(carol, Permission.ACTIVE))
COMMENT('''
Second move is by alice:
''')
host.push_action(
"move",
{
"challenger": alice,
"host": carol,
"by": alice,
"row":1, "column":1
},
permission=(alice, Permission.ACTIVE))
t = host.table("games", carol)
self.assertEqual(t.json["rows"][0]["board"][0], 1)
self.assertEqual(t.json["rows"][0]["board"][1], 0)
self.assertEqual(t.json["rows"][0]["board"][2], 0)
self.assertEqual(t.json["rows"][0]["board"][3], 0)
self.assertEqual(t.json["rows"][0]["board"][4], 2)
self.assertEqual(t.json["rows"][0]["board"][5], 0)
self.assertEqual(t.json["rows"][0]["board"][6], 0)
self.assertEqual(t.json["rows"][0]["board"][7], 0)
self.assertEqual(t.json["rows"][0]["board"][8], 0)
COMMENT('''
Restarting the game:
''')
host.push_action(
"restart",
{
"challenger": alice,
"host": carol,
"by": carol
},
permission=(carol, Permission.ACTIVE))
t = host.table("games", carol)
self.assertEqual(t.json["rows"][0]["board"][0], 0)
self.assertEqual(t.json["rows"][0]["board"][1], 0)
self.assertEqual(t.json["rows"][0]["board"][2], 0)
self.assertEqual(t.json["rows"][0]["board"][3], 0)
self.assertEqual(t.json["rows"][0]["board"][4], 0)
self.assertEqual(t.json["rows"][0]["board"][5], 0)
self.assertEqual(t.json["rows"][0]["board"][6], 0)
self.assertEqual(t.json["rows"][0]["board"][7], 0)
self.assertEqual(t.json["rows"][0]["board"][8], 0)
COMMENT('''
Closing the game:
WARNING: This action should fail due to authority mismatch!
''')
with self.assertRaises(MissingRequiredAuthorityError):
host.push_action(
"close",
{
"challenger": alice,
"host": carol
},
permission=(alice, Permission.ACTIVE))
COMMENT('''
Closing the game:
''')
host.push_action(
"close",
{
"challenger": alice,
"host": carol
},
permission=(carol, Permission.ACTIVE))
def tearDown(self):
pass
@classmethod
def tearDownClass(cls):
if testnet.is_local():
stop()
else:
cls.stats()
testnet = None
extra_ram = None
extra_stake_net = None
extra_stake_cpu = None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='''
This is a unit test for the ``tic-tac-toe`` smart contract.
It works both on a local testnet and remote testnet.
The default option is local testnet.
''')
parser.add_argument(
"alias", nargs="?",
help="Testnet alias")
parser.add_argument(
"-t", "--testnet", nargs=4,
help="<url> <name> <owner key> <active key>")
parser.add_argument(
"-r", "--reset", action="store_true",
help="Reset testnet cache")
parser.add_argument(
"--ram", default=0, help="extra RAM in kbytes")
parser.add_argument(
"--net", default=0, help="extra NET stake in EOS")
parser.add_argument(
"--cpu", default=0, help="extra CPU stake in EOS")
args = parser.parse_args()
testnet = get_testnet(args.alias, args.testnet, reset=args.reset)
testnet.configure()
if args.reset and not testnet.is_local():
testnet.clear_cache()
extra_ram = int(args.ram)
extra_stake_net = int(args.net)
extra_stake_cpu = int(args.cpu)
unittest.main(argv=[sys.argv[0]])
| 31.917563
| 86
| 0.516676
|
f225f37c2a76f130875945482344989c99c42737
| 7,048
|
py
|
Python
|
identity/tests/test_ajax_views.py
|
andressamagblr/vault
|
f37d61b93a96d91278b5d4163f336ada7209240f
|
[
"Apache-2.0"
] | null | null | null |
identity/tests/test_ajax_views.py
|
andressamagblr/vault
|
f37d61b93a96d91278b5d4163f336ada7209240f
|
[
"Apache-2.0"
] | null | null | null |
identity/tests/test_ajax_views.py
|
andressamagblr/vault
|
f37d61b93a96d91278b5d4163f336ada7209240f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
import json
from unittest import TestCase
from unittest.mock import patch
from identity.views import (
ListUserRoleView, AddUserRoleView, DeleteUserRoleView)
from identity.tests.fakes import FakeToken, FakeResource
from vault.tests.fakes import fake_request, UserFactory
class BaseAjaxTestCase(TestCase):
view_class = None
def setUp(self):
self.view = self.view_class.as_view()
self.request = fake_request(method='POST')
self.request.user.token = FakeToken
self.request.user.is_superuser = True
patch('identity.keystone.Keystone._create_keystone_connection').start()
patch('identity.views.log').start()
def tearDown(self):
patch.stopall()
class TestListUserRole(BaseAjaxTestCase):
view_class = ListUserRoleView
def setUp(self):
super(TestListUserRole, self).setUp()
self.mock_user_list = patch(
'identity.keystone.Keystone.user_list').start()
def test_list_user_role_needs_authentication(self):
req = fake_request(method='POST', user=False)
response = self.view(req)
self.assertEqual(response.status_code, 302)
def test_list_user_role_need_to_be_superuser(self):
self.request.user.is_superuser = False
response = self.view(self.request)
msgs = [msg for msg in self.request._messages]
self.assertGreater(len(msgs), 0)
self.assertEqual(response.status_code, 302)
self.assertEqual(msgs[0].message, 'Unauthorized')
def test_list_user_role_response_content_is_json(self):
response = self.view(self.request)
self.assertEqual(response._headers.get(
'content-type')[1], 'application/json')
@patch('identity.views.ListUserRoleView.get_user_roles')
def test_list_user_role_return_sorted_users(self, mock_get_user_roles):
user1 = FakeResource(1)
user1.name = 'ZZZ'
user2 = FakeResource(2)
user2.name = 'BBBB'
user3 = FakeResource(3)
user3.name = 'LLLL'
self.mock_user_list.return_value = [user1, user2, user3]
mock_get_user_roles.return_value = []
post = self.request.POST.copy()
post.update({'project': 1})
self.request.POST = post
response = self.view(self.request)
computed = json.loads(response.content)
computed_names = [x.get('username') for x in computed.get('users')]
expected = sorted([user1.name, user2.name, user3.name])
self.assertEqual(computed_names, expected)
def test_user_list_was_called(self):
post = self.request.POST.copy()
post.update({'project': 1})
self.request.POST = post
self.view(self.request)
self.mock_user_list.assert_called_with(project_id=1)
@patch('identity.views.ListUserRoleView.get_user_roles')
def test_get_user_roles_was_called(self, mock_get_user_roles):
user = FakeResource(1)
user.username = 'User1'
self.mock_user_list.return_value = [user]
mock_get_user_roles.return_value = []
post = self.request.POST.copy()
post.update({'project': 1})
self.request.POST = post
self.view(self.request)
mock_get_user_roles.assert_called_with(user, 1)
@patch('identity.views.ListUserRoleView.get_user_roles')
def test_get_user_roles_exception(self, mock_get_user_roles):
mock_get_user_roles.side_effect = Exception()
mock_get_user_roles.return_value = []
user = FakeResource(1)
user.username = 'User1'
self.mock_user_list.return_value = [user]
post = self.request.POST.copy()
post.update({'project': 1})
self.request.POST = post
response = self.view(self.request)
self.assertEqual(response.status_code, 500)
class TestAddUserRole(BaseAjaxTestCase):
view_class = AddUserRoleView
def test_add_user_role_needs_authentication(self):
req = fake_request(method='POST', user=False)
self.request.user.token = None
response = self.view(req)
self.assertEqual(response.status_code, 302)
def test_add_user_role_need_to_be_superuser(self):
self.request.user.is_superuser = False
response = self.view(self.request)
msgs = [msg for msg in self.request._messages]
self.assertGreater(len(msgs), 0)
self.assertEqual(response.status_code, 302)
self.assertEqual(msgs[0].message, 'Unauthorized')
@patch('identity.keystone.Keystone.add_user_role')
def test_add_user_role_response_content_is_json(self, mock_add_user_role):
response = self.view(self.request)
self.assertEqual(response._headers.get(
'content-type')[1], 'application/json')
@patch('identity.keystone.Keystone.add_user_role')
def test_add_user_role_was_called(self, mock_add_user_role):
post = self.request.POST.copy()
post.update({'project': 1, 'role': 1, 'user': 1})
self.request.POST = post
self.view(self.request)
mock_add_user_role.assert_called_with(project=1, role=1, user=1)
@patch('identity.keystone.Keystone.add_user_role')
def test_add_user_role_exception(self, mock_add_user_role):
mock_add_user_role.side_effect = Exception()
response = self.view(self.request)
self.assertEqual(response.status_code, 500)
class TestDeleteUserRole(BaseAjaxTestCase):
view_class = DeleteUserRoleView
def test_delete_user_role_needs_authentication(self):
req = fake_request(method='POST', user=False)
self.request.user.token = None
response = self.view(req)
self.assertEqual(response.status_code, 302)
def test_delete_user_role_need_to_be_superuser(self):
self.request.user.is_superuser = False
response = self.view(self.request)
msgs = [msg for msg in self.request._messages]
self.assertGreater(len(msgs), 0)
self.assertEqual(response.status_code, 302)
self.assertEqual(msgs[0].message, 'Unauthorized')
@patch('identity.keystone.Keystone.remove_user_role')
def test_delete_user_role_response_content_is_json(self, mock_remove_user_role):
response = self.view(self.request)
self.assertEqual(response._headers.get(
'content-type')[1], 'application/json')
@patch('identity.keystone.Keystone.remove_user_role')
def test_remove_user_role_was_called(self, mock_remove_user_role):
post = self.request.POST.copy()
post.update({'project': 1, 'role': 1, 'user': 1})
self.request.POST = post
self.view(self.request)
mock_remove_user_role.assert_called_with(project=1, role=1, user=1)
@patch('identity.keystone.Keystone.remove_user_role')
def test_remove_user_role_exception(self, mock_remove_user_role):
mock_remove_user_role.side_effect = Exception()
response = self.view(self.request)
self.assertEqual(response.status_code, 500)
| 31.891403
| 84
| 0.684875
|
9d1fbeecc7c354dc0d4e46dbed0f07b06d67f604
| 6,290
|
py
|
Python
|
models/deeplab/backbone/resnet.py
|
DLWK/CGRNet
|
a9a65fa192cc9888e7861755313b8b3ac80fa512
|
[
"MIT"
] | 1
|
2022-03-29T06:32:34.000Z
|
2022-03-29T06:32:34.000Z
|
models/deeplab/backbone/resnet.py
|
DLWK/CGRNet
|
a9a65fa192cc9888e7861755313b8b3ac80fa512
|
[
"MIT"
] | null | null | null |
models/deeplab/backbone/resnet.py
|
DLWK/CGRNet
|
a9a65fa192cc9888e7861755313b8b3ac80fa512
|
[
"MIT"
] | null | null | null |
import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from models.deeplab.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False)
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, output_stride, BatchNorm, pretrained=True):
self.inplanes = 64
super(ResNet, self).__init__()
blocks = [1, 2, 4]
if output_stride == 16:
strides = [1, 2, 2, 1]
dilations = [1, 1, 1, 2]
elif output_stride == 8:
strides = [1, 2, 1, 1]
dilations = [1, 1, 2, 4]
else:
raise NotImplementedError
# Modules
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = BatchNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0], BatchNorm=BatchNorm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1], BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2], BatchNorm=BatchNorm)
self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)
self._init_weight()
if pretrained:
self._load_pretrained_model()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample, BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def _make_MG_unit(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, dilation=blocks[0]*dilation,
downsample=downsample, BatchNorm=BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, len(blocks)):
layers.append(block(self.inplanes, planes, stride=1,
dilation=blocks[i]*dilation, BatchNorm=BatchNorm))
return nn.Sequential(*layers)
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
low_level_feat = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x, low_level_feat
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _load_pretrained_model(self):
pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/resnet101-5d3b4d8f.pth')
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
def ResNet101(output_stride, BatchNorm, pretrained=True):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], output_stride, BatchNorm, pretrained=False)
# model.features[0]=nn.Conv2d(1, 64, kernel_size=3, padding=1)
return model
if __name__ == "__main__":
import torch
model = ResNet101(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=8)
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())
| 38.588957
| 130
| 0.603021
|
594b5ee880c3cdd827a4630da80f34d9ce69b160
| 3,221
|
py
|
Python
|
thespian/test/test_initmsgs.py
|
dendron2000/Thespian
|
0acbc5a0803f6d2be3421ea6eb08c6beecbf3802
|
[
"MIT"
] | 210
|
2015-08-31T19:39:34.000Z
|
2020-01-10T08:07:48.000Z
|
thespian/test/test_initmsgs.py
|
dendron2000/Thespian
|
0acbc5a0803f6d2be3421ea6eb08c6beecbf3802
|
[
"MIT"
] | 85
|
2017-04-08T19:28:42.000Z
|
2022-03-23T15:25:49.000Z
|
thespian/test/test_initmsgs.py
|
dendron2000/Thespian
|
0acbc5a0803f6d2be3421ea6eb08c6beecbf3802
|
[
"MIT"
] | 47
|
2015-09-01T19:24:20.000Z
|
2020-01-02T20:03:05.000Z
|
from time import sleep
from datetime import timedelta
from thespian.test import *
from thespian.actors import *
from thespian.transient import transient, transient_idle
from thespian.initmsgs import initializing_messages
from thespian.troupe import troupe
max_ask_wait = timedelta(milliseconds=250)
class Msg1(object): pass
class Msg2(object): pass
class Msg3(object): pass
class Msg4(object): pass
@initializing_messages([('i_msg1', Msg1, True),
('i_msg2', Msg2),
('i_msg3', str),
], 'init_done')
class Actor1(ActorTypeDispatcher):
def init_done(self):
self.send(self.i_msg1_sender, 'init is done')
def receiveMsg_str(self, strmsg, sender):
self.send(self.i_msg1_sender, 's:'+strmsg)
def receiveMsg_Msg3(self, msg3, sender):
self.send(sender, self.i_msg2)
def receiveMsg_Msg4(self, msg4, sender):
self.send(self.i_msg1_sender, msg4)
self.send(sender, self.i_msg3)
@initializing_messages([('proxy', ActorAddress, True)])
class ProxyActor(Actor):
def receiveMessage(self, msg, sender):
if not isinstance(msg, ActorSystemMessage):
if sender == self.proxy_sender:
self.send(self.proxy, msg)
else:
self.send(self.proxy_sender, msg)
def test_simpleinit(asys):
t1 = asys.createActor(ProxyActor)
asys.tell(t1, asys.createActor(Actor1))
asys.tell(t1, Msg1())
r = asys.ask(t1, "ready?", max_ask_wait)
assert r is None
r = asys.ask(t1, Msg2(), max_ask_wait)
assert r == "init is done"
r = asys.ask(t1, "running?", max_ask_wait)
assert r == "s:running?"
m4 = Msg4()
r = asys.ask(t1, m4, max_ask_wait)
r2 = asys.listen(max_ask_wait)
if r == "ready?":
assert r == "ready?"
assert isinstance(r2, Msg4)
else:
assert isinstance(r, Msg4)
assert r2 == "ready?"
asys.shutdown()
@initializing_messages([('i_msg1', Msg1, True),
('i_msg2', Msg2),
('i_msg3', str),
], 'init_done')
@transient(timedelta(seconds=1))
class Actor2(ActorTypeDispatcher):
def init_done(self):
self.send(self.i_msg1_sender, self.i_msg3)
def receiveMsg_Msg3(self, msg3, sender):
self.send(sender, self.i_msg2)
def test_init_transient(asys):
t1 = asys.createActor(ProxyActor)
asys.tell(t1, asys.createActor(Actor2))
asys.tell(t1, Msg1())
r = asys.ask(t1, "ready?", max_ask_wait)
assert r is None
r = asys.ask(t1, Msg2(), max_ask_wait)
assert r == "ready?"
r = asys.ask(t1, Msg3(), max_ask_wait)
assert isinstance(r, Msg2)
asys.tell(t1, Msg3())
r = asys.ask(t1, Msg3(), max_ask_wait)
assert isinstance(r, Msg2)
r = asys.listen(max_ask_wait)
assert isinstance(r, Msg2)
r = asys.listen(max_ask_wait)
assert r is None
r = asys.ask(t1, Msg3(), max_ask_wait)
assert isinstance(r, Msg2)
# n.b. if the system is slow such that it takes more than 1 second
# to reach this point, this test will have a false failure.
sleep(1.1)
r = asys.ask(t1, Msg3(), max_ask_wait)
assert r is None
| 29.018018
| 70
| 0.629618
|
b2d11b56b33091986707bfae069602b462ba9da2
| 342
|
py
|
Python
|
froide_exam/migrations/0008_remove_curriculum_jurisdiction.py
|
okfde/froide-exam
|
f4033dc6a7a2687089931b217f4d4bcfa013ae7a
|
[
"MIT"
] | 1
|
2018-11-28T15:34:29.000Z
|
2018-11-28T15:34:29.000Z
|
froide_exam/migrations/0008_remove_curriculum_jurisdiction.py
|
okfde/froide-exam
|
f4033dc6a7a2687089931b217f4d4bcfa013ae7a
|
[
"MIT"
] | 2
|
2021-01-18T14:28:49.000Z
|
2021-03-16T11:55:55.000Z
|
froide_exam/migrations/0008_remove_curriculum_jurisdiction.py
|
okfde/froide-exam
|
f4033dc6a7a2687089931b217f4d4bcfa013ae7a
|
[
"MIT"
] | 1
|
2019-05-01T12:34:55.000Z
|
2019-05-01T12:34:55.000Z
|
# Generated by Django 2.1.5 on 2019-02-08 11:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('froide_exam', '0007_auto_20190208_1158'),
]
operations = [
migrations.RemoveField(
model_name='curriculum',
name='jurisdiction',
),
]
| 19
| 51
| 0.608187
|
124661b3adc5e1f7b7b960b71645dbb419466d53
| 520
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/funnel/hoverlabel/_namelength.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/funnel/hoverlabel/_namelength.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/funnel/hoverlabel/_namelength.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class NamelengthValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="namelength", parent_name="funnel.hoverlabel", **kwargs
):
super(NamelengthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", -1),
**kwargs
)
| 32.5
| 81
| 0.636538
|
7209738e0aed65bab829a47e73824aa218c04729
| 4,840
|
py
|
Python
|
frappe/widgets/form/meta.py
|
cadencewatches/frappe
|
d9dcf132a10d68b2dcc80ef348e6d967f1e44084
|
[
"MIT"
] | null | null | null |
frappe/widgets/form/meta.py
|
cadencewatches/frappe
|
d9dcf132a10d68b2dcc80ef348e6d967f1e44084
|
[
"MIT"
] | null | null | null |
frappe/widgets/form/meta.py
|
cadencewatches/frappe
|
d9dcf132a10d68b2dcc80ef348e6d967f1e44084
|
[
"MIT"
] | 1
|
2018-03-21T15:51:46.000Z
|
2018-03-21T15:51:46.000Z
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# metadata
from __future__ import unicode_literals
import frappe, os
from frappe.utils import cstr, cint
from frappe.model.meta import Meta
from frappe.modules import scrub, get_module_path
from frappe.model.workflow import get_workflow_name
######
def get_meta(doctype, cached=True):
if cached:
meta = frappe.cache().get_value("form_meta:" + doctype, lambda: FormMeta(doctype))
else:
meta = FormMeta(doctype)
if frappe.local.lang != 'en':
meta.set("__messages", frappe.get_lang_dict("doctype", doctype))
return meta
class FormMeta(Meta):
def __init__(self, doctype):
super(FormMeta, self).__init__(doctype)
self.load_assets()
def load_assets(self):
self.add_search_fields()
if not self.istable:
self.add_linked_with()
self.add_code()
self.load_print_formats()
self.load_workflows()
def as_dict(self, no_nulls=False):
d = super(FormMeta, self).as_dict(no_nulls=no_nulls)
for k in ("__js", "__css", "__list_js", "__calendar_js", "__map_js", "__linked_with", "__messages"):
d[k] = self.get(k)
for i, df in enumerate(d.get("fields")):
for k in ("link_doctype", "search_fields"):
df[k] = self.get("fields")[i].get(k)
return d
def add_code(self):
path = os.path.join(get_module_path(self.module), 'doctype', scrub(self.name))
def _get_path(fname):
return os.path.join(path, scrub(fname))
self._add_code(_get_path(self.name + '.js'), '__js')
self._add_code(_get_path(self.name + '.css'), "__css")
self._add_code(_get_path(self.name + '_list.js'), '__list_js')
self._add_code(_get_path(self.name + '_calendar.js'), '__calendar_js')
self._add_code(_get_path(self.name + '_map.js'), '__map_js')
self.add_custom_script()
self.add_code_via_hook("doctype_js", "__js")
def _add_code(self, path, fieldname):
js = frappe.read_file(path)
if js:
self.set(fieldname, (self.get(fieldname) or "") + "\n\n" + render_jinja(js))
def add_code_via_hook(self, hook, fieldname):
hook = "{}:{}".format(hook, self.name)
for app_name in frappe.get_installed_apps():
for file in frappe.get_hooks(hook, app_name=app_name):
path = frappe.get_app_path(app_name, *file.strip("/").split("/"))
self._add_code(path, fieldname)
def add_custom_script(self):
"""embed all require files"""
# custom script
custom = frappe.db.get_value("Custom Script", {"dt": self.name,
"script_type": "Client"}, "script") or ""
self.set("__js", (self.get('__js') or '') + "\n\n" + custom)
def render_jinja(content):
if "{% include" in content:
content = frappe.get_jenv().from_string(content).render()
return content
def add_search_fields(self):
"""add search fields found in the doctypes indicated by link fields' options"""
for df in self.get("fields", {"fieldtype": "Link", "options":["!=", "[Select]"]}):
if df.options:
search_fields = frappe.get_meta(df.options).search_fields
if search_fields:
df.search_fields = map(lambda sf: sf.strip(), search_fields.split(","))
def add_linked_with(self):
"""add list of doctypes this doctype is 'linked' with"""
links = frappe.db.sql("""select parent, fieldname from tabDocField
where (fieldtype="Link" and options=%s)
or (fieldtype="Select" and options=%s)""", (self.name, "link:"+ self.name))
links += frappe.db.sql("""select dt as parent, fieldname from `tabCustom Field`
where (fieldtype="Link" and options=%s)
or (fieldtype="Select" and options=%s)""", (self.name, "link:"+ self.name))
links = dict(links)
if not links:
return {}
ret = {}
for dt in links:
ret[dt] = { "fieldname": links[dt] }
for grand_parent, options in frappe.db.sql("""select parent, options from tabDocField
where fieldtype="Table"
and options in (select name from tabDocType
where istable=1 and name in (%s))""" % ", ".join(["%s"] * len(links)) ,tuple(links)):
ret[grand_parent] = {"child_doctype": options, "fieldname": links[options] }
if options in ret:
del ret[options]
self.set("__linked_with", ret)
def load_print_formats(self):
frappe.response.docs.extend(frappe.db.sql("""select * FROM `tabPrint Format`
WHERE doc_type=%s AND docstatus<2""", (self.name,), as_dict=1, update={"doctype":"Print Format"}))
def load_workflows(self):
# get active workflow
workflow_name = get_workflow_name(self.name)
if workflow_name and frappe.db.exists("Workflow", workflow_name):
workflow = frappe.get_doc("Workflow", workflow_name)
frappe.response.docs.append(workflow)
for d in workflow.get("workflow_document_states"):
frappe.response.docs.append(frappe.get_doc("Workflow State", d.state))
def render_jinja(content):
if "{% include" in content:
content = frappe.get_jenv().from_string(content).render()
return content
| 32.483221
| 102
| 0.695455
|
da63617147e3d60a684d58fee8fef08b345b9e84
| 6,366
|
py
|
Python
|
voice_based_email_for_blind.py
|
vantage-ola/voice-based-email-for-blind
|
3764370b852fcd554ec2f4d033185d3e11cb21d1
|
[
"MIT"
] | 1
|
2021-07-12T04:54:53.000Z
|
2021-07-12T04:54:53.000Z
|
voice_based_email_for_blind.py
|
vantage-ola/voice-based-email-for-blind
|
3764370b852fcd554ec2f4d033185d3e11cb21d1
|
[
"MIT"
] | null | null | null |
voice_based_email_for_blind.py
|
vantage-ola/voice-based-email-for-blind
|
3764370b852fcd554ec2f4d033185d3e11cb21d1
|
[
"MIT"
] | null | null | null |
import speech_recognition as sr
import smtplib
# import pyaudio
# import platform
# import sys
from bs4 import BeautifulSoup
import email
import imaplib
from gtts import gTTS
import pyglet
import os, time
#pyglet.lib.load_library('avbin')
#pyglet.have_avbin=True
#project: :. Project: Voice based Email for blind :.
# Author: Sayak Naskar
#fetch project name
tts = gTTS(text="Project: Voice based Email for blind", lang='en')
ttsname=("name.mp3") #Example: path -> C:\Users\sayak\Desktop> just change with your desktop directory. Don't use my directory.
tts.save(ttsname)
music = pyglet.media.load(ttsname, streaming = False)
music.play()
time.sleep(music.duration)
os.remove(ttsname)
#login from os
login = os.getlogin
print ("You are logging from : "+login())
#choices
print ("1. composed a mail.")
tts = gTTS(text="option 1. composed a mail.", lang='en')
ttsname=("hello.mp3") #Example: path -> C:\Users\sayak\Desktop> just change with your desktop directory. Don't use my directory.
tts.save(ttsname)
music = pyglet.media.load(ttsname, streaming = False)
music.play()
time.sleep(music.duration)
os.remove(ttsname)
print ("2. Check your inbox")
tts = gTTS(text="option 2. Check your inbox", lang='en')
ttsname=("second.mp3")
tts.save(ttsname)
music = pyglet.media.load(ttsname, streaming = False)
music.play()
time.sleep(music.duration)
os.remove(ttsname)
#this is for input choices
tts = gTTS(text="Your choice ", lang='en')
ttsname=("hello.mp3") #Example: path -> C:\Users\sayak\Desktop> just change with your desktop directory. Don't use my directory.
tts.save(ttsname)
music = pyglet.media.load(ttsname, streaming = False)
music.play()
time.sleep(music.duration)
os.remove(ttsname)
#voice recognition part
r = sr.Recognizer()
with sr.Microphone() as source:
print ("Your choice:")
audio=r.listen(source)
print ("ok done!!")
try:
text=r.recognize_google(audio)
print ("You said : "+text)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio.")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
#choices details
if text == '1' or text == 'One' or text == 'one':
r = sr.Recognizer() #recognize
with sr.Microphone() as source:
print ("Your message :")
audio=r.listen(source)
print ("ok done!!")
try:
text1=r.recognize_google(audio)
print ("You said : "+text1)
msg = text1
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio.")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
mail = smtplib.SMTP('smtp.gmail.com',587) #host and port area
mail.ehlo() #Hostname to send for this command defaults to the FQDN of the local host.
mail.starttls() #security connection
mail.login('emailID','pswrd') #login part
mail.sendmail('emailID','victimID',msg) #send part
print ("Congrates! Your mail has send. ")
tts = gTTS(text="Congrates! Your mail has send. ", lang='en')
ttsname=("send.mp3") #Example: path -> C:\Users\sayak\Desktop> just change with your desktop directory. Don't use my directory.
tts.save(ttsname)
music = pyglet.media.load(ttsname, streaming = False)
music.play()
time.sleep(music.duration)
os.remove(ttsname)
mail.close()
if text == '2' or text == 'tu' or text == 'two' or text == 'Tu' or text == 'to' or text == 'To' :
mail = imaplib.IMAP4_SSL('imap.gmail.com',993) #this is host and port area.... ssl security
unm = ('your mail or victim mail') #username
psw = ('pswrd') #password
mail.login(unm,psw) #login
stat, total = mail.select('Inbox') #total number of mails in inbox
print ("Number of mails in your inbox :"+str(total))
tts = gTTS(text="Total mails are :"+str(total), lang='en') #voice out
ttsname=("total.mp3") #Example: path -> C:\Users\sayak\Desktop> just change with your desktop directory. Don't use my directory.
tts.save(ttsname)
music = pyglet.media.load(ttsname, streaming = False)
music.play()
time.sleep(music.duration)
os.remove(ttsname)
#unseen mails
unseen = mail.search(None, 'UnSeen') # unseen count
print ("Number of UnSeen mails :"+str(unseen))
tts = gTTS(text="Your Unseen mail :"+str(unseen), lang='en')
ttsname=("unseen.mp3") #Example: path -> C:\Users\sayak\Desktop> just change with your desktop directory. Don't use my directory.
tts.save(ttsname)
music = pyglet.media.load(ttsname, streaming = False)
music.play()
time.sleep(music.duration)
os.remove(ttsname)
#search mails
result, data = mail.uid('search',None, "ALL")
inbox_item_list = data[0].split()
new = inbox_item_list[-1]
old = inbox_item_list[0]
result2, email_data = mail.uid('fetch', new, '(RFC822)') #fetch
raw_email = email_data[0][1].decode("utf-8") #decode
email_message = email.message_from_string(raw_email)
print ("From: "+email_message['From'])
print ("Subject: "+str(email_message['Subject']))
tts = gTTS(text="From: "+email_message['From']+" And Your subject: "+str(email_message['Subject']), lang='en')
ttsname=("mail.mp3") #Example: path -> C:\Users\sayak\Desktop> just change with your desktop directory. Don't use my directory.
tts.save(ttsname)
music = pyglet.media.load(ttsname, streaming = False)
music.play()
time.sleep(music.duration)
os.remove(ttsname)
#Body part of mails
stat, total1 = mail.select('Inbox')
stat, data1 = mail.fetch(total1[0], "(UID BODY[TEXT])")
msg = data1[0][1]
soup = BeautifulSoup(msg, "html.parser")
txt = soup.get_text()
print ("Body :"+txt)
tts = gTTS(text="Body: "+txt, lang='en')
ttsname=("body.mp3") #Example: path -> C:\Users\sayak\Desktop> just change with your desktop directory. Don't use my directory.
tts.save(ttsname)
music = pyglet.media.load(ttsname, streaming = False)
music.play()
time.sleep(music.duration)
os.remove(ttsname)
mail.close()
mail.logout()
| 36.170455
| 134
| 0.657713
|
5f7c1d82cfdc875958c6cb017467529c954e86ba
| 20,627
|
py
|
Python
|
erniekit/utils/util_helper.py
|
PaddlePaddle/LARK
|
94a2367ba7f0f83b48330233450ea095d8dc9382
|
[
"Apache-2.0"
] | 1,552
|
2019-03-03T19:52:07.000Z
|
2019-07-19T06:47:57.000Z
|
erniekit/utils/util_helper.py
|
PaddlePaddle/LARK
|
94a2367ba7f0f83b48330233450ea095d8dc9382
|
[
"Apache-2.0"
] | 154
|
2019-03-06T08:19:57.000Z
|
2019-07-19T02:52:22.000Z
|
erniekit/utils/util_helper.py
|
PaddlePaddle/LARK
|
94a2367ba7f0f83b48330233450ea095d8dc9382
|
[
"Apache-2.0"
] | 382
|
2019-03-04T13:37:01.000Z
|
2019-07-19T06:33:44.000Z
|
# -*- coding: utf-8 -*
"""import"""
import collections
import json
import unicodedata
from collections import OrderedDict
import six
from ..common.rule import MaxTruncation
from . import params
import os, tarfile
def append_name(name, postfix):
""" append name with postfix """
if name is None:
ret = None
elif name == '':
ret = postfix
else:
ret = '%s_%s' % (name, postfix)
return ret
def parse_data_config(config_path):
"""truncate_seq_pair
:param config_path:
:return:
"""
try:
with open(config_path) as json_file:
config_dict = json.load(json_file, object_pairs_hook=OrderedDict)
except Exception:
raise IOError("Error in parsing Ernie model config file '%s'" % config_path)
else:
return config_dict
def parse_version_code(version_str, default_version_code=1.5):
"""
parser paddle fluid version code to float type
:param version_str:
:param default_version_code:
:return:
"""
if version_str:
v1 = version_str.split(".")[0:2]
v_code_str = ".".join(v1)
v_code = float(v_code_str)
return v_code
else:
return default_version_code
def truncation_words(words, max_seq_length, truncation_type):
"""
:param words:
:param max_seq_length:
:param truncation_type:
:return:
"""
if len(words) > max_seq_length:
if truncation_type == MaxTruncation.KEEP_HEAD:
words = words[0: max_seq_length]
elif truncation_type == MaxTruncation.KEEP_TAIL:
tmp = words[0: max_seq_length - 1]
tmp.append(words[-1])
words = tmp
elif truncation_type == MaxTruncation.KEEP_BOTH_HEAD_TAIL:
tmp = words[1: max_seq_length - 2]
tmp.insert(0, words[0])
tmp.insert(max_seq_length - 1, words[-1])
words = tmp
else:
words = words[0: max_seq_length]
return words
def truncate_seq_pair(tokens_a, tokens_b, max_length):
"""
:param tokens_a:
:param tokens_a:
:param max_length:
:return:
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a peice of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def clean_text(text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or is_control(char):
continue
if is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def printable_text(text):
"""Returns text encoded in a way suitable for print or `tf.logging`."""
# These functions want `str` for both Python2 and Python3, but in one case
# it's a Unicode string and in the other it's a byte string.
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode("utf-8")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def save_meta_data(data_dict, save_file, mode="add"):
"""
:param data_dict:
:param save_file:
:param mode: 保存模式: override, add
:return:
"""
# 目标文件已存在且是追加模式的时候,需要先将原来的dict读出来,再用新的dict去更新原来的dict,最后保存
if os.path.exists(save_file) and mode == "add":
meta_dict = params.from_file(save_file)
_meta = params.replace_none(meta_dict)
_meta.update(data_dict)
json_str = json.dumps(_meta)
else:
json_str = json.dumps(data_dict)
with open(save_file, 'w') as json_file:
json_file.write(json_str)
def get_model_paths(path_checkpoint, path_inference_model, steps, need_encryption=False):
""" 通过step和trainer_param配置中的output路径,计算出模型存储时需要用到的所有路径
:param path_checkpoint:
:param path_inference_model:
:param steps:
:param need_encryption:
:return:
"""
suffix = ""
infer_meta_name = "infer_data_params.json"
model_meta_name = "model.meta"
if need_encryption:
suffix = "_enc"
# 文件保存的原始路径,当不需要加密的时候,原始路径和最终的模型保存路径是同一个
checkpoint_original_name = "checkpoints_step_" + str(steps)
checkpoint_original_model_path = os.path.join(path_checkpoint, checkpoint_original_name)
checkpoint_name = "checkpoints_step_" + str(steps) + suffix
checkpoint_meta_path = os.path.join(path_checkpoint, checkpoint_name, model_meta_name)
checkpoint_model_path = os.path.join(path_checkpoint, checkpoint_name)
checkpoint_infer_meta_path = os.path.join(path_checkpoint, checkpoint_name, infer_meta_name)
checkpoint_irepo_meta_path = os.path.join(path_checkpoint, checkpoint_name + ".meta")
inference_original_name = "inference_step_" + str(steps)
inference_original_model_path = os.path.join(path_inference_model, inference_original_name)
inference_name = "inference_step_" + str(steps) + suffix
inference_meta_path = os.path.join(path_inference_model, inference_name, model_meta_name)
inference_model_path = os.path.join(path_inference_model, inference_name)
inference_infer_meta_path = os.path.join(path_inference_model, inference_name, infer_meta_name)
inference_irepo_meta_path = os.path.join(path_inference_model, inference_name + ".meta")
path_dict = collections.OrderedDict()
path_dict["checkpoints_name"] = checkpoint_name
path_dict["checkpoints_original_name"] = checkpoint_original_name
path_dict["checkpoints_original_model_path"] = checkpoint_original_model_path
path_dict["checkpoints_model_path"] = checkpoint_model_path
path_dict["checkpoints_meta_path"] = checkpoint_meta_path
path_dict["checkpoints_infer_meta_path"] = checkpoint_infer_meta_path
path_dict["checkpoints_irepo_meta_path"] = checkpoint_irepo_meta_path
path_dict["inference_name"] = inference_name
path_dict["inference_original_name"] = inference_original_name
path_dict["inference_original_model_path"] = inference_original_model_path
path_dict["inference_model_path"] = inference_model_path
path_dict["inference_meta_path"] = inference_meta_path
path_dict["inference_infer_meta_path"] = inference_infer_meta_path
path_dict["inference_irepo_meta_path"] = inference_irepo_meta_path
return path_dict
def format_convert_bio(dir_path, vocab_path=None):
"""return"""
def is_alphabet_or_digit(c):
"""return"""
alphabet = list(u"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
digit = list(u"0123456789.")
if c in alphabet or c in digit:
return True
return False
vocab_map = collections.OrderedDict()
count = 0
filelist = os.listdir(dir_path)
for file_path in filelist:
if file_path.endswith(".txt"):
file_path = os.path.join(dir_path, file_path)
with open(file_path, "r") as fp1:
with open(file_path + "_bio", "w") as fp2:
for line in fp1:
try:
tokens, triple, offset = line.strip("\n").split("\t")
_, _, predicate = triple.split(" ")
subject_start, subject_end, object_start, object_end = offset.split(" ")
except Exception:
print(line.strip("\n"))
continue
tokens = list(convert_to_unicode(tokens))
labels = ["O"] * len(tokens)
labels[int(subject_start)] = "B-" + predicate + "@" + "S"
for i in range(int(subject_start) + 1, int(subject_end) + 1):
labels[i] = "I"
if not ("B-" + predicate + "@" + "S") in vocab_map:
vocab_map["B-" + predicate + "@" + "S"] = count
count += 1
labels[int(object_start)] = "B-" + predicate + "@" + "O"
for i in range(int(object_start) + 1, int(object_end) + 1):
labels[i] = "I"
if not ("B-" + predicate + "@" + "O") in vocab_map:
vocab_map["B-" + predicate + "@" + "O"] = count
count += 1
# sub_tokens = []
# sub_labels = []
# sub_token = ""
# sub_label = ""
# is_first = True
# for i in range(len(tokens)):
# if is_alphabet_or_digit(tokens[i]):
# sub_token += tokens[i]
# if is_first:
# sub_label = labels[i]
# is_first = False
# else:
# if sub_token != "":
# sub_tokens.append(sub_token)
# sub_labels.append(sub_label)
# sub_token = ""
# is_first = True
# sub_tokens.append(tokens[i])
# sub_labels.append(labels[i])
# if sub_token != "":
# sub_tokens.append(sub_token)
# sub_labels.append(sub_label)
# if len(sub_tokens) != len(sub_labels) or u"" in sub_tokens:
# print("Hello", "*****")
# continue
fp2.write(" ".join(tokens) + "\t")
fp2.write(" ".join(labels) + "\n")
os.remove(file_path)
vocab_map["I"] = count
vocab_map["O"] = count + 1
# if vocab_path:
# with open(vocab_path, "w") as fp3:
# for key in vocab_map.keys():
# fp3.write(key + "\t" + str(vocab_map[key]) + "\n")
# return len(vocab_map)
return None
def make_targz(output_filename, source_dir):
"""压缩某个文件为tar.gz
:param output_filename: 压缩包路径
:param source_dir: 待压缩原始路径
:return:
"""
errcode = -1
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
errcode = 0
return errcode
def get_warmup_and_linear_decay(max_steps, warmup_steps):
""" warmup linear decay function """
return lambda step: min(step / warmup_steps, 1. - (step - warmup_steps) / (max_steps - warmup_steps))
_work_dir = None
def get_work_path(path):
"""
get_work_path
"""
if not path or not _work_dir or path[0] in './':
return path
return os.path.join(_work_dir, path)
# paddle _import_module_from_library函数重写,待paddle上线后可废弃
import logging
import sys
from paddle.fluid import core
from paddle.fluid.framework import OpProtoHolder
import threading
import atexit
import textwrap
from importlib import machinery
logger = logging.getLogger("utils.util_helper")
logger.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
def log_v(info, verbose=True):
"""
Print log information on stdout.
"""
if verbose:
logger.info(info)
OS_NAME = sys.platform
IS_WINDOWS = OS_NAME.startswith('win')
def load_op_meta_info_and_register_op(lib_filename):
"""
load of meta info and register op
"""
core.load_op_meta_info_and_register_op(lib_filename)
return OpProtoHolder.instance().update_op_proto()
def import_module_from_library_wenxin(module_name, build_directory, verbose=False):
"""
Load shared library and import it as callable python module.
"""
if IS_WINDOWS:
dynamic_suffix = '.pyd'
elif OS_NAME.startswith('darwin'):
dynamic_suffix = '.dylib'
else:
dynamic_suffix = '.so'
ext_path = os.path.join(build_directory, module_name + dynamic_suffix)
if not os.path.exists(ext_path):
raise ValueError("Extension path: {} does not exist.".format(
ext_path))
# load custom op_info and kernels from .so shared library
log_v('loading shared library from: {}'.format(ext_path), verbose)
op_names = load_op_meta_info_and_register_op(ext_path)
# generate Python api in ext_path
return _generate_python_module(module_name, op_names, build_directory,
verbose)
DEFAULT_OP_ATTR_NAMES = [
core.op_proto_and_checker_maker.kOpRoleAttrName(),
core.op_proto_and_checker_maker.kOpRoleVarAttrName(),
core.op_proto_and_checker_maker.kOpNameScopeAttrName(),
core.op_proto_and_checker_maker.kOpCreationCallstackAttrName(),
core.op_proto_and_checker_maker.kOpDeviceAttrName(),
core.op_proto_and_checker_maker.kOpWithQuantAttrName()
]
def parse_op_info(op_name):
"""
Parse input names and outpus detail information from registered custom op
from OpInfoMap.
"""
if op_name not in OpProtoHolder.instance().op_proto_map:
raise ValueError(
"Please load {} shared library file firstly by "
"`paddle.utils.cpp_extension.load_op_meta_info_and_register_op(...)`".
format(op_name))
op_proto = OpProtoHolder.instance().get_op_proto(op_name)
in_names = [x.name for x in op_proto.inputs]
out_names = [x.name for x in op_proto.outputs]
attr_names = [
x.name for x in op_proto.attrs if x.name not in DEFAULT_OP_ATTR_NAMES
]
return in_names, out_names, attr_names
def _get_api_inputs_str(op_name):
"""
Returns string of api parameters and inputs dict.
"""
in_names, out_names, attr_names = parse_op_info(op_name)
# e.g: x, y, z
param_names = in_names + attr_names
# NOTE(chenweihang): we add suffix `@VECTOR` for std::vector<Tensor> input,
# but the string contains `@` cannot used as argument name, so we split
# input name by `@`, and only use first substr as argument
params_str = ','.join([p.split("@")[0].lower() for p in param_names])
# e.g: {'X': x, 'Y': y, 'Z': z}
ins_str = "{%s}" % ','.join([
"'{}' : {}".format(in_name, in_name.split("@")[0].lower())
for in_name in in_names
])
# e.g: {'num': n}
attrs_str = "{%s}" % ",".join([
"'{}' : {}".format(attr_name, attr_name.split("@")[0].lower())
for attr_name in attr_names
])
# e.g: ['Out', 'Index']
outs_str = "[%s]" % ','.join(["'{}'".format(name) for name in out_names])
return [params_str, ins_str, attrs_str, outs_str]
def _custom_api_content(op_name):
(params_str, ins_str, attrs_str, outs_str) = _get_api_inputs_str(op_name)
API_TEMPLATE = textwrap.dedent("""
from paddle.fluid.core import VarBase
from paddle.fluid.framework import in_dygraph_mode, _dygraph_tracer
from paddle.fluid.layer_helper import LayerHelper
def {op_name}({inputs}):
# prepare inputs and outputs
ins = {ins}
attrs = {attrs}
outs = {{}}
out_names = {out_names}
# The output variable's dtype use default value 'float32',
# and the actual dtype of output variable will be inferred in runtime.
if in_dygraph_mode():
for out_name in out_names:
outs[out_name] = VarBase()
_dygraph_tracer().trace_op(type="{op_name}", inputs=ins, outputs=outs, attrs=attrs)
else:
helper = LayerHelper("{op_name}", **locals())
for out_name in out_names:
outs[out_name] = helper.create_variable(dtype='float32')
helper.append_op(type="{op_name}", inputs=ins, outputs=outs, attrs=attrs)
res = [outs[out_name] for out_name in out_names]
return res[0] if len(res)==1 else res
""").lstrip()
# generate python api file
api_content = API_TEMPLATE.format(
op_name=op_name,
inputs=params_str,
ins=ins_str,
attrs=attrs_str,
out_names=outs_str)
return api_content
def _load_module_from_file(api_file_path, module_name, verbose=False):
"""
Load module from python file.
"""
if not os.path.exists(api_file_path):
raise ValueError("File : {} does not exist.".format(
api_file_path))
# Unique readable module name to place custom api.
log_v('import module from file: {}'.format(api_file_path), verbose)
ext_name = "_paddle_cpp_extension_" + module_name
# load module with RWLock
loader = machinery.SourceFileLoader(ext_name, api_file_path)
module = loader.load_module()
return module
def _generate_python_module(module_name,
op_names,
build_directory,
verbose=False):
"""
Automatically generate python file to allow import or load into as module
"""
def remove_if_exit(filepath):
"""
remove if file exit
"""
if os.path.exists(filepath):
os.remove(filepath)
# NOTE: Use unique id as suffix to avoid write same file at same time in
# both multi-thread and multi-process.
thread_id = str(threading.currentThread().ident)
api_file = os.path.join(build_directory,
module_name + '_' + thread_id + '.py')
log_v("generate api file: {}".format(api_file), verbose)
# delete the temp file before exit python process
atexit.register(lambda: remove_if_exit(api_file))
# write into .py file with RWLock
api_content = [_custom_api_content(op_name) for op_name in op_names]
with open(api_file, 'w') as f:
f.write('\n\n'.join(api_content))
# load module
custom_module = _load_module_from_file(api_file, module_name, verbose)
return custom_module
| 34.961017
| 105
| 0.61308
|
1251561b62d6537a68fef3caa1aae81e70162fba
| 8,766
|
py
|
Python
|
maha/parsers/functions/parse_fn.py
|
TRoboto/Maha
|
f229adbb1dcccb6bf8f84852723d24f97d511b24
|
[
"BSD-3-Clause"
] | 152
|
2021-09-18T08:18:47.000Z
|
2022-03-14T13:23:17.000Z
|
maha/parsers/functions/parse_fn.py
|
TRoboto/Maha
|
f229adbb1dcccb6bf8f84852723d24f97d511b24
|
[
"BSD-3-Clause"
] | 65
|
2021-09-20T06:00:41.000Z
|
2022-03-20T22:44:39.000Z
|
maha/parsers/functions/parse_fn.py
|
TRoboto/Maha
|
f229adbb1dcccb6bf8f84852723d24f97d511b24
|
[
"BSD-3-Clause"
] | 10
|
2021-09-18T11:56:57.000Z
|
2021-11-20T09:05:16.000Z
|
"""Functions that extracts values from text"""
from __future__ import annotations
__all__ = ["parse", "parse_expression"]
from maha.constants import (
ALL_HARAKAT,
ARABIC,
ARABIC_LETTERS,
ARABIC_LIGATURES,
ARABIC_NUMBERS,
ARABIC_PUNCTUATIONS,
EMPTY,
ENGLISH,
ENGLISH_CAPITAL_LETTERS,
ENGLISH_LETTERS,
ENGLISH_NUMBERS,
ENGLISH_PUNCTUATIONS,
ENGLISH_SMALL_LETTERS,
HARAKAT,
NUMBERS,
PUNCTUATIONS,
SPACE,
TATWEEL,
)
from maha.expressions import (
EXPRESSION_ARABIC_HASHTAGS,
EXPRESSION_ARABIC_MENTIONS,
EXPRESSION_EMAILS,
EXPRESSION_EMOJIS,
EXPRESSION_ENGLISH_HASHTAGS,
EXPRESSION_ENGLISH_MENTIONS,
EXPRESSION_HASHTAGS,
EXPRESSION_LINKS,
EXPRESSION_MENTIONS,
)
from maha.parsers.templates import Dimension, DimensionType, TextExpression
from maha.rexy import Expression, ExpressionGroup
def parse(
text: str,
arabic: bool = False,
english: bool = False,
arabic_letters: bool = False,
english_letters: bool = False,
english_small_letters: bool = False,
english_capital_letters: bool = False,
numbers: bool = False,
harakat: bool = False,
all_harakat: bool = False,
tatweel: bool = False,
punctuations: bool = False,
arabic_numbers: bool = False,
english_numbers: bool = False,
arabic_punctuations: bool = False,
english_punctuations: bool = False,
arabic_ligatures: bool = False,
arabic_hashtags: bool = False,
arabic_mentions: bool = False,
emails: bool = False,
english_hashtags: bool = False,
english_mentions: bool = False,
hashtags: bool = False,
links: bool = False,
mentions: bool = False,
emojis: bool = False,
custom_expressions: ExpressionGroup | Expression | None = None,
include_space=False,
) -> list[Dimension]:
"""Extracts certain characters/patterns from the given text.
To add a new parameter, make sure that its name is the same as the corresponding
constant. For the patterns, only remove the prefix ``EXPRESSION_`` from the parameter name
.. admonition:: TO DO
Add the ability to combine all expressions before parsing.
Parameters
----------
text : str
Text to be processed
arabic : bool, optional
Extract :data:`~.ARABIC` characters, by default False
english : bool, optional
Extract :data:`~.ENGLISH` characters, by default False
arabic_letters : bool, optional
Extract :data:`~.ARABIC_LETTERS` characters, by default False
english_letters : bool, optional
Extract :data:`~.ENGLISH_LETTERS` characters, by default False
english_small_letters : bool, optional
Extract :data:`~.ENGLISH_SMALL_LETTERS` characters, by default False
english_capital_letters : bool, optional
Extract :data:`~.ENGLISH_CAPITAL_LETTERS` characters, by default False
numbers : bool, optional
Extract :data:`~.NUMBERS` characters, by default False
harakat : bool, optional
Extract :data:`~.HARAKAT` characters, by default False
all_harakat : bool, optional
Extract :data:`~.ALL_HARAKAT` characters, by default False
tatweel : bool, optional
Extract :data:`~.TATWEEL` character, by default False
punctuations : bool, optional
Extract :data:`~.PUNCTUATIONS` characters, by default False
arabic_numbers : bool, optional
Extract :data:`~.ARABIC_NUMBERS` characters, by default False
english_numbers : bool, optional
Extract :data:`~.ENGLISH_NUMBERS` characters, by default False
arabic_punctuations : bool, optional
Extract :data:`~.ARABIC_PUNCTUATIONS` characters, by default False
english_punctuations : bool, optional
Extract :data:`~.ENGLISH_PUNCTUATIONS` characters, by default False
arabic_ligatures : bool, optional
Extract :data:`~.ARABIC_LIGATURES` words, by default False
arabic_hashtags : bool, optional
Extract Arabic hashtags using the expression :data:`~.EXPRESSION_ARABIC_HASHTAGS`,
by default False
arabic_mentions : bool, optional
Extract Arabic mentions using the expression :data:`~.EXPRESSION_ARABIC_MENTIONS`,
by default False
emails : bool, optional
Extract Arabic hashtags using the expression :data:`~.EXPRESSION_EMAILS`,
by default False
english_hashtags : bool, optional
Extract Arabic hashtags using the expression :data:`~.EXPRESSION_ENGLISH_HASHTAGS`,
by default False
english_mentions : bool, optional
Extract Arabic hashtags using the expression :data:`~.EXPRESSION_ENGLISH_MENTIONS`,
by default False
hashtags : bool, optional
Extract Arabic hashtags using the expression :data:`~.EXPRESSION_HASHTAGS`,
by default False
links : bool, optional
Extract Arabic hashtags using the expression :data:`~.EXPRESSION_LINKS`,
by default False
mentions : bool, optional
Extract Arabic hashtags using the expression :data:`~.EXPRESSION_MENTIONS`,
by default False
emojis : bool, optional
Extract emojis using the expression :data:`~.EXPRESSION_EMOJIS`,
by default False
custom_expressions : Union[:class:`~.ExpressionGroup`, :class:`~.Expression`],
optional. Include any other string(s), by default None
include_space : bool, optional
Include the space expression :data:`~.EXPRESSION_SPACE` with all characters,
by default False
Returns
-------
List[:class:`~.Dimension`]
List of dimensions extracted from the text
Raises
------
ValueError
If no argument is set to True
"""
if not text:
return []
# current function arguments
current_arguments = locals()
constants = globals()
output = []
any_argument_set = False
# Since each argument has the same name as the corresponding constant
# (But, expressions should be prefixed with "EXPRESSION_" to match the actual expression.)
# Looping through all arguments and appending constants that correspond to the
# True arguments can work
# TODO: Maybe find a good pythonic way to do this
for arg, value in current_arguments.items():
const = constants.get(arg.upper())
if const and value is True:
any_argument_set = True
if include_space:
pattern = f"(?:[{''.join(const)}](?:\\s+)?)+"
else:
pattern = f"[{''.join(const)}]+"
text_exp = TextExpression(pattern)
parsed = parse_expression(text, text_exp, DimensionType[arg.upper()])
output.extend(parsed)
continue
# check for expression
expression: Expression | None = constants.get("EXPRESSION_" + arg.upper())
if expression and value is True:
any_argument_set = True
text_exp = TextExpression(str(expression))
parsed = parse_expression(text, text_exp, DimensionType[arg.upper()])
output.extend(parsed)
if custom_expressions:
any_argument_set = True
output.extend(parse_expression(text, custom_expressions))
if not any_argument_set:
raise ValueError("At least one argument should be True")
return output
def parse_expression(
text: str,
expressions: ExpressionGroup | Expression,
dimension_type: DimensionType = DimensionType.GENERAL,
) -> list[Dimension]:
"""
Extract matched strings in the given ``text`` using the input ``patterns``
Parameters
----------
text : str
Text to check
expressions : Union[:class:`~.ExpressionGroup`, :class:`~.Expression`]
Expression(s) to use
dimension_type : DimensionType
Dimension type of the input ``expressions``,
by default :attr:`.DimensionType.GENERAL`
Returns
-------
List[:class:`~.Dimension`]
List of extracted dimensions
Raises
------
ValueError
If ``expressions`` are invalid
"""
if (
not expressions
or (isinstance(expressions, Expression) and not expressions.pattern)
or (isinstance(expressions, ExpressionGroup) and not expressions.expressions)
):
raise ValueError("'expressions' cannot be empty.")
# convert to ExpressionGroup
if isinstance(expressions, Expression):
expressions = ExpressionGroup(expressions)
output = []
for result in expressions.parse(text):
start = result.start
end = result.end
value = result.value
body = text[start:end]
output.append(
Dimension(result.expression, body, value, start, end, dimension_type)
)
return output
| 34.108949
| 94
| 0.670888
|
837528af63d281db62376d0ae9de14c493f773ed
| 1,786
|
py
|
Python
|
pkg_classes/led8x8prime.py
|
parttimehacker/clock
|
53ba721179951945058037b0c4bd8588c2bb1f95
|
[
"MIT"
] | null | null | null |
pkg_classes/led8x8prime.py
|
parttimehacker/clock
|
53ba721179951945058037b0c4bd8588c2bb1f95
|
[
"MIT"
] | 1
|
2019-02-24T17:29:11.000Z
|
2019-02-24T17:29:11.000Z
|
pkg_classes/led8x8prime.py
|
parttimehacker/clock
|
53ba721179951945058037b0c4bd8588c2bb1f95
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
""" Test Bed for Diyhas System Status class """
import time
BRIGHTNESS = 10
UPDATE_RATE_SECONDS = 0.2
PRIMES = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67,
71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229,
233, 239, 241, 251]
class Led8x8Prime:
""" Prime numbers less than 256 display on an 8x8 matrix """
def __init__(self, matrix8x8):
""" create the prime object """
self.matrix = matrix8x8
self.index = 0
self.row = 0
self.iterations = 0
def reset(self,):
""" initialize and start the prime number display """
self.index = 0
self.row = 0
self.iterations = 0
self.matrix.set_brightness(BRIGHTNESS)
def display(self,):
""" display primes up to the max for 8 bits """
time.sleep(UPDATE_RATE_SECONDS)
self.matrix.clear()
# cycle through the primes
self.index += 1
if self.index >= len(PRIMES):
self.index = 0
self.row = 0
number = PRIMES[self.index]
#display 8 bit prime per row
row = self.row
self.row += 1
if self.row >= 8:
self.row = 0
for xpixel in range(0, 8):
bit = number & (1 << xpixel)
if self.iterations == 3:
self.iterations = 1
else:
self.iterations += 1
if bit == 0:
self.matrix.set_pixel(row, xpixel, 0)
else:
self.matrix.set_pixel(row, xpixel, self.iterations)
self.matrix.write_display()
if __name__ == '__main__':
exit()
| 29.278689
| 84
| 0.530795
|
905346c6480f4227e3ff33bb73a1ea2d5a8ff4ff
| 7,039
|
py
|
Python
|
service/app/modules/ModelManager.py
|
leifan89/video-analytics-serving
|
1327d650927fd78e55b7cbcb6d7238ca2b240028
|
[
"BSD-3-Clause"
] | null | null | null |
service/app/modules/ModelManager.py
|
leifan89/video-analytics-serving
|
1327d650927fd78e55b7cbcb6d7238ca2b240028
|
[
"BSD-3-Clause"
] | null | null | null |
service/app/modules/ModelManager.py
|
leifan89/video-analytics-serving
|
1327d650927fd78e55b7cbcb6d7238ca2b240028
|
[
"BSD-3-Clause"
] | null | null | null |
'''
* Copyright (C) 2019 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
'''
import os
import sys
import json
import fnmatch
import string
import common.settings # pylint: disable=import-error
from common.utils import logging # pylint: disable=import-error
logger = logging.get_logger('ModelManager', is_static=True)
from collections.abc import MutableMapping
class ModelsDict(MutableMapping):
def __init__(self, model_name,model_version,*args, **kw):
self._model_name = model_name
self._model_version = model_version
self._dict = dict(*args, **kw)
def __setitem__(self, key, value):
self._dict[key] = value
def __delitem__(self, key):
del self._dict[key]
def __getitem__(self, key):
if (key=="network"):
if ('default' in self._dict["networks"]):
return self._dict["networks"]["default"]
else:
return "{{models[{}][{}][VA_DEVICE_DEFAULT][network]}}".format(self._model_name,self._model_version)
if (key in self._dict["networks"]):
return self._dict["networks"][key]
return self._dict.get(key, None)
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
class ModelManager:
models = None
network_preference = {'CPU':"FP32",
'HDDL':"FP16",
'GPU':"FP16",
'VPU':"FP16"}
@staticmethod
def _get_model_proc(path):
candidates=fnmatch.filter(os.listdir(path), "*.json")
if (len(candidates)>1):
raise Exception("Multiple model proc files found in %s" %(path,))
elif(len(candidates)==1):
return os.path.abspath(os.path.join(path,candidates[0]))
return None
@staticmethod
def _get_model_network(path):
candidates=fnmatch.filter(os.listdir(path), "*.xml")
if (len(candidates)>1):
raise Exception("Multiple networks found in %s" %(path,))
elif(len(candidates)==1):
return os.path.abspath(os.path.join(path,candidates[0]))
return None
@staticmethod
def _get_model_networks(path):
networks = {}
default = ModelManager._get_model_network(path)
if (default):
networks["default"] = default
for network_type in os.listdir(path):
network_type_path = os.path.join(path,network_type)
if (os.path.isdir(network_type_path)):
network = ModelManager._get_model_network(network_type_path)
if (network):
networks[network_type] = {'network':network}
return networks
@staticmethod
def get_network(model, network):
preferred_model=model.replace("VA_DEVICE_DEFAULT",network)
try:
preferred_model=string.Formatter().vformat(preferred_model, [], {'models':ModelManager.models})
return preferred_model
except Exception:
pass
return None
@staticmethod
def get_default_network_for_device(device,model):
if "VA_DEVICE_DEFAULT" in model:
for preference in ModelManager.network_preference[device]:
ret = ModelManager.get_network(model,preference)
if ret:
return ret
logger.info("Device preferred network {net} not found".format(net=preference))
model=model.replace("[VA_DEVICE_DEFAULT]","")
logger.error("Could not resolve any preferred network {net} for model {model}".format(net=ModelManager.network_preference[device],model=model))
return model
@staticmethod
def load_config(model_dir,network_preference):
logger.info("Loading Models from Path {path}".format(path=os.path.abspath(model_dir)))
if os.path.islink(model_dir):
logger.warning("Models directory is symbolic link")
if os.path.ismount(model_dir):
logger.warning("Models directory is mount point")
models = {}
ModelManager.network_preference.update(network_preference)
for key in ModelManager.network_preference:
ModelManager.network_preference[key] = ModelManager.network_preference[key].split(',')
for model_name in os.listdir(model_dir):
try:
model_path = os.path.join(model_dir,model_name)
for version in os.listdir(model_path):
version_path = os.path.join(model_path,version)
if (os.path.isdir(version_path)):
version = int(version)
proc = ModelManager._get_model_proc(version_path)
networks = ModelManager._get_model_networks(version_path)
if (proc) and (networks):
for key in networks:
networks[key].update({"proc":proc,
"version":version,
"type":"IntelDLDT",
"description":model_name})
models[model_name] = {version:ModelsDict(model_name,
version,
{"networks":networks,
"proc":proc,
"version":version,
"type":"IntelDLDT",
"description":model_name
})
}
except Exception as error:
logger.error("Error Loading Model {model_name} from: {model_dir}: {err}".format(err=error,model_name=model_name,model_dir=model_dir))
ModelManager.models = models
logger.info("Completed Loading Models")
@staticmethod
def get_model_parameters(name, version):
if name not in ModelManager.models or version not in ModelManager.models[name] :
return None
params_obj = {
"name": name,
"version": version
}
if "type" in ModelManager.models[name][version]:
params_obj["type"] = ModelManager.models[name][version]["type"]
if "description" in ModelManager.models[name][version]:
params_obj["description"] = ModelManager.models[name][version]["description"]
return params_obj
@staticmethod
def get_loaded_models():
results = []
if ModelManager.models is not None:
for model in ModelManager.models:
for version in ModelManager.models[model].keys():
result = ModelManager.get_model_parameters(model, version)
if result :
results.append(result)
return results
| 41.405882
| 155
| 0.565137
|
7c8b9b048f01714609561cf7ef409739319e9ba2
| 5,303
|
py
|
Python
|
basetestcase/base_FormTestCase.py
|
Spleeding1/django-basetestcase
|
341bb0921c9eb3699f44ca59b6d0b1dbfa32bd00
|
[
"MIT"
] | 1
|
2019-07-30T12:55:47.000Z
|
2019-07-30T12:55:47.000Z
|
basetestcase/base_FormTestCase.py
|
Spleeding1/django-basetestcase
|
341bb0921c9eb3699f44ca59b6d0b1dbfa32bd00
|
[
"MIT"
] | null | null | null |
basetestcase/base_FormTestCase.py
|
Spleeding1/django-basetestcase
|
341bb0921c9eb3699f44ca59b6d0b1dbfa32bd00
|
[
"MIT"
] | null | null | null |
from .base_UtilityTestCase import UtilityTestCase
class FormTestCase(UtilityTestCase):
def form_field_test(self, field, error_messages={}, help_text='', initial=None, label='', required=True, widget_attrs={}):
form = self.form()
try:
form.fields[field]
except KeyError:
raise Exception(f'\n Form does not have field\n\n {field}')
completed_attrs = []
for attr, expected in widget_attrs.items():
try:
actual = form.fields[field].widget.attrs[attr]
except KeyError:
raise Exception(
f'\n {attr} for form field {field} has not been set.'
)
self.assertEquals(expected, actual, msg=f'\n {field}: {attr}')
completed_attrs.append(attr)
for attr, actual in form.fields[field].widget.attrs.items():
if attr not in completed_attrs:
raise Exception(f'\n {field}:{attr} is set and should not be.')
if required is True and 'required' not in error_messages:
error_messages['required'] = 'This field is required.'
elif len(error_messages) is not 0:
for error, error_message in error_messages.items():
actual_error_message = form.fields[field].error_messages[error]
self.assertEquals(
error_message,
actual_error_message,
msg=f'\n {field}: error_message[{error}]'
)
actual_help_text = form.fields[field].help_text
self.assertEquals(
help_text,
actual_help_text,
msg=f'\n {field}: help_text'
)
actual_initial = form.fields[field].initial
self.assertEquals(
initial,
actual_initial,
msg=f'\n {field}: initial value'
)
actual_label = form.fields[field].label
self.assertEquals(
label,
actual_label,
msg=f'\n{field}: label'
)
actual_required = form.fields[field].required
self.assertEquals(
required,
actual_required,
msg=f'\n {field}: required'
)
def form_required_field_error_test(self, data={}, error_messages={}):
required_fields = []
Form = self.form()
for field in Form.fields:
if Form.fields[field].required is True:
required_fields.append(field)
for field, value in data.items():
if field in required_fields:
_data = {**data}
_data[field] = ''
form = self.form(data={**_data})
self.assertFalse(
form.is_valid(),
msg=f'\n Form should not be valid.\n {field} should be blank.\n data={_data}'
)
error = 'This field is required.'
if field in error_messages:
error = error_messages[field]
self.assertEqual(
form.errors[field],
[error],
msg=f'{field}'
)
def formset_error_test(self, formset, form_index=None, field=None, message=''):
formset.is_valid()
if form_index is None and field is None:
self.assertEqual(formset.non_form_errors(), [message])
if field is None:
self.assertEqual(formset[form_index].non_field_errors(), [message])
else:
self.assertEqual(formset[form_index].errors[field], [message])
def formset_test(self, baseformset=None, can_delete=False, extra=1, field_data={}, form=None, formset=None, initial=0, max_num=None, min_num=None, model=None, prefix='form', total=1, validate_max=False, validate_min=False):
form_data, test_model_instances = self.formset_filler(
field_data=field_data,
initial=initial,
model=model,
prefix=prefix,
total=total
)
test_formset = formset(form_data, prefix=prefix)
self.assertEqual(test_formset.extra, extra)
if baseformset is not None:
self.assertTrue(issubclass(formset, baseformset))
if form is not None:
self.assertTrue(issubclass(test_formset.form, form))
if max_num is not None:
self.assertEqual(test_formset.max_num, max_num)
if min_num is not None:
self.assertEqual(test_formset.min_num, min_num)
self.assertEqual(test_formset.can_delete, can_delete)
self.assertEqual(test_formset.prefix, prefix)
self.assertEqual(test_formset.validate_max, validate_max)
self.assertEqual(test_formset.validate_min, validate_min)
if test_formset.is_valid():
test_formset.save()
if model is not None:
self.instances_saved_test(model, test_model_instances, total)
else:
self.fail(
f'formset is not valid.\n{data}\n{test_formset.non_form_errors()}\n{test_formset.errors}'
)
| 37.878571
| 227
| 0.555912
|
2f8fe29cbc6484db407e926dae23407273f0b664
| 711
|
py
|
Python
|
src/compas/utilities/datetime.py
|
Sam-Bouten/compas
|
011c7779ded9b69bb602568b470bb0443e336f62
|
[
"MIT"
] | null | null | null |
src/compas/utilities/datetime.py
|
Sam-Bouten/compas
|
011c7779ded9b69bb602568b470bb0443e336f62
|
[
"MIT"
] | null | null | null |
src/compas/utilities/datetime.py
|
Sam-Bouten/compas
|
011c7779ded9b69bb602568b470bb0443e336f62
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import time
import datetime
__all__ = [
'timestamp',
'now'
]
def timestamp():
"""Generate a timestamp using the current date and time.
Returns
-------
str
The timestamp.
Examples
--------
>>> type(timestamp()) == type('')
True
"""
return datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d%H%M%S')
def now():
"""Generate a timestamp using the current date and time.
Returns
-------
str
The timestamp.
Examples
--------
>>> type(now()) == type('')
True
"""
return timestamp()
| 15.12766
| 80
| 0.582278
|
f812fad2ab1bdb3320a2efd7baa74c1e3517ae01
| 861
|
py
|
Python
|
prep/dia01/parser_test.py
|
pliba/garoa2018
|
8ba5d392b5d16223f93566bd46c26546669e9668
|
[
"BSD-3-Clause"
] | 2
|
2018-11-22T14:49:38.000Z
|
2022-01-15T19:12:05.000Z
|
prep/dia01/parser_test.py
|
pliba/garoa2018
|
8ba5d392b5d16223f93566bd46c26546669e9668
|
[
"BSD-3-Clause"
] | null | null | null |
prep/dia01/parser_test.py
|
pliba/garoa2018
|
8ba5d392b5d16223f93566bd46c26546669e9668
|
[
"BSD-3-Clause"
] | null | null | null |
from pytest import mark
from parser import tokenize, parse
@mark.parametrize("source,want",[
('a', ['a']),
('abs', ['abs']),
('(now)', ['(', 'now', ')']),
('()', ['(', ')']),
('(+ 2 3)', ['(', '+', '2', '3', ')']),
('(+ 2 (* 3 5))', ['(', '+', '2', '(', '*', '3', '5', ')',')']),
])
def test_tokenize(source, want):
tokens = tokenize(source)
assert want == tokens
@mark.parametrize("tokens,want",[
(['2'], 2),
(['a'], 'a'),
(['sqrt'], 'sqrt'),
(['(', 'now', ')'], ['now']),
(['(', '+', '2', '3', ')'], ['+', 2, 3]),
(['(', '+', '2', '(', '*', '3', '5', ')',')'], ['+', 2, ['*', 3, 5]]),
])
def test_parse(tokens, want):
ast = parse(tokens)
assert want == ast
def test_parse_double_close():
ast = parse(tokenize('(+ 2 (* 4 5))'))
want = ['+', 2, ['*', 4, 5]]
assert want == ast
| 24.6
| 74
| 0.380952
|
4686335ba402d0083d7da29190b97e3ce3edc1fa
| 169
|
py
|
Python
|
allauth/socialaccount/providers/doximity/urls.py
|
mina-gaid/scp-stock-forcasting
|
38e1cd303d4728a987df117f666ce194e241ed1a
|
[
"MIT"
] | 1
|
2018-04-06T21:36:59.000Z
|
2018-04-06T21:36:59.000Z
|
allauth/socialaccount/providers/doximity/urls.py
|
mina-gaid/scp-stock-forcasting
|
38e1cd303d4728a987df117f666ce194e241ed1a
|
[
"MIT"
] | 6
|
2020-06-05T18:44:19.000Z
|
2022-01-13T00:48:56.000Z
|
allauth/socialaccount/providers/doximity/urls.py
|
mina-gaid/scp-stock-forcasting
|
38e1cd303d4728a987df117f666ce194e241ed1a
|
[
"MIT"
] | null | null | null |
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import DoximityProvider
urlpatterns = default_urlpatterns(DoximityProvider)
| 28.166667
| 75
| 0.87574
|
6a61bcc265cbd0c95e415522643901d600455272
| 15,411
|
py
|
Python
|
smartcat/api.py
|
yakninja/evan-bot
|
8475a79a6369c78478eaca71cdc0e548f5853794
|
[
"BSD-2-Clause"
] | 1
|
2020-11-23T02:54:55.000Z
|
2020-11-23T02:54:55.000Z
|
smartcat/api.py
|
yakninja/evan-bot
|
8475a79a6369c78478eaca71cdc0e548f5853794
|
[
"BSD-2-Clause"
] | null | null | null |
smartcat/api.py
|
yakninja/evan-bot
|
8475a79a6369c78478eaca71cdc0e548f5853794
|
[
"BSD-2-Clause"
] | 1
|
2021-11-10T19:52:23.000Z
|
2021-11-10T19:52:23.000Z
|
# -*- coding: utf-8 -*-
"""
smartcat.api
~~~~~~~~~~~~
This module contains classes that make http requests to SmartCAT
`API Documentation <https://smartcat.ai/api/methods/>`_
Original project at https://github.com/gilyaev/smartcat-python-sdk
Modified by Andrey Kravchuk https://github.com/yakninja (added account part, some methods etc)
"""
import json
from abc import ABCMeta
import requests
class SmartcatException(Exception):
def __init__(self, message, code=0):
super(SmartcatException, self).__init__(message)
self.code = code
self.message = message
class SmartCAT(object):
"""SmartCAT API
Provides functionality for SmartCAT resource management:
- project
- document
Manage Project Resource::
>>> from smartcat.api import SmartCAT
>>> api = SmartCAT('username', 'password', SmartCAT.SERVER_EUROPE)
>>> project_resource = api.project
<smartcat.api.Project>
>>> project_model = {
"name": "Sample Project",
"sourceLanguage": "en",
"targetLanguages": ["ru"],
"assignToVendor": False
}
>>> res = project_resource.create(data=project_model)
<Response [200]>
Manage Document Resource::
>>> from smartcat.api import SmartCAT
>>> api = SmartCAT('username', 'password', SmartCAT.SERVER_EUROPE)
>>> document_resource = api.document
<smartcat.api.Document>
>>> res = document_resource.request_export(document_ids=['project1_doc1', 'project1_doc2', 'project2_doc1'])
<Response [200]>
>>> res = document_resource.request_export(document_ids='project1_doc1')
<Response [200]>
"""
SERVER_USA = 'https://us.smartcat.ai'
SERVER_EUROPE = 'https://smartcat.ai'
def __init__(self, username, password, server_url=SERVER_EUROPE):
"""
Constructor
:param username: SmartCAT API username.
:param password: SmartCAT API password.
:param server_url (optional): The API server: SmartCAT.SERVER_EUROPE or SmartCAT.SERVER_USA
"""
self.username = username
self.password = password
self.server_url = server_url
#: :class:`Project <Project>`.
self._project = None
self._document = None
self._account = None
pass
@property
def project(self):
"""Returns instance of class:`Project <smartcat.api.Project>`.
:return: :class:`Project <smartcat.api.Project>` object
:rtype: smartcat.api.Project
"""
if self._project is not None:
return self._project
self._project = self._create_api_resource('Project')
return self._project
@property
def document(self):
"""Returns instance of `Document <smartcat.api.Document>`
:return: :class:`Document <smartcat.api.Document>` object
:rtype: smartcat.api.Document
"""
if self._document is not None:
return self._document
self._document = self._create_api_resource('Document')
return self._document
@property
def account(self):
"""Returns instance of `Account <smartcat.api.Account>`
:return: :class:`Account <smartcat.api.Account>` object
:rtype: smartcat.api.Account
"""
if self._account is not None:
return self._account
self._account = self._create_api_resource('Account')
return self._account
def _create_api_resource(self, resource):
"""Creates and returns API resource
:return: :class:`BaseResource <BaseResource>` object
:rtype: smartcat.BaseResource
"""
return globals()[resource](self.username, self.password, self.server_url)
class BaseResource(object):
__metaclass__ = ABCMeta
def __init__(self, username, password, server):
self.session = requests.Session()
self.session.auth = (username, password)
self.session.headers.update({'Accept': 'application/json'})
self.server = server
def send_get_request(self, path, **kwargs):
url = self.server + path
return self.session.get(url, **kwargs)
def send_options_request(self, path, **kwargs):
url = self.server + path
return self.session.options(url, **kwargs)
def send_head_request(self, path, **kwargs):
url = self.server + path
return self.session.put(url, **kwargs)
def send_post_request(self, path, data=None, json=None, **kwargs):
url = self.server + path
return self.session.post(url, data=data, json=json, **kwargs)
def send_put_request(self, path, data=None, **kwargs):
url = self.server + path
return self.session.put(url, data=data, **kwargs)
def send_patch_request(self, path, data=None, **kwargs):
url = self.server + path
return self.session.patch(url, data=data, **kwargs)
def send_delete_request(self, path, **kwargs):
url = self.server + path
return self.session.delete(url, **kwargs)
class Project(BaseResource):
def create(self, data, files=None):
# type: (dict) -> requests.Response
"""Create a new project
:param data: The project information.
:type data: dict
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``)
for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional
headers to add for the file
"""
if files is None:
files = {}
files["model"] = (None, json.dumps(data), 'application/json')
return self.send_post_request(
'/api/integration/v1/project/create',
files=files)
def update(self, id, data):
"""Update project by id
:param id: The project identifier.
:param data: The project information.
:type data: dict
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return self.send_put_request(
'/api/integration/v1/project/%s' % id,
json=data)
def delete(self, id):
"""Delete project
:param id: The project identifier.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return self.send_delete_request('/api/integration/v1/project/%s' % id)
def cancel(self, id):
"""Cancel the project
:param id: The project identifier.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return self.send_post_request(
'/api/integration/v1/project/cancel',
params={'projectId': id})
def restore(self, id):
"""Restore the project
:param id: The project identifier.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return self.send_post_request(
'/api/integration/v1/project/restore',
params={'projectId': id})
def get(self, id):
"""Get project
:param id: The project identifier.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return self.send_get_request('/api/integration/v1/project/%s' % id)
def completed_work_statistics(self, id):
"""Receiving statistics for the completed parts of the project.
:param id: The project identifier.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return self.send_get_request('/api/integration/v1/project/%s/completedWorkStatistics' % id)
def get_all(self):
"""Get document list.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return self.send_get_request('/api/integration/v1/project/list')
def attach_document(self, id, files):
"""Adds document to project.
:param id: The project identifier.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``)
for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional
headers to add for the file
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
params = {'projectId': id}
return self.send_post_request('/api/integration/v1/project/document', files=files, params=params)
def add_target_lang(self, id, lang):
"""Add a new target language to the project
:param id: The project identifier.
:param lang: Target language code.
:return: :class:`Response <Response>` object
:rtype:
"""
return self.send_post_request(
'/api/integration/v1/project/language',
params={'projectId': id, 'targetLanguage': lang})
def get_document_by_name(self, project_id, document_name):
"""Return document dict by name or id
:param project_id: The project identifier.
:param document_name: Document name or id.
:return dict: If no document with the name was found, return None
"""
response = self.get(project_id)
if response.status_code != 200:
raise SmartcatException(code=response.status_code, message='Invalid response code')
project_data = json.loads(response.content.decode('utf-8'))
if not project_data:
raise SmartcatException(message='Invalid response')
name = document_name.lower()
for d in project_data['documents']:
if d['id'] == name or d['name'].lower() == name:
return d
return None
class Document(BaseResource):
def update(self, document_id, files):
"""Updates document
:param document_id: The document identifier.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``)
for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional
headers to add for the file
:return: :class:`Response <Response>` object
:rtype: requests.Response
todo:: implement updateDocumentModel
"""
return self.send_put_request(
'/api/integration/v1/document/update',
files=files,
params={'documentId': document_id})
def rename(self, id, name):
"""Renames document
:param id: The document identifier.
:param name: New name.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return self.send_put_request(
'/api/integration/v1/document/rename',
params={'documentId': id, 'name': name})
def get_translation_status(self, id):
"""Receive the status of adding document translation.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return self.send_get_request(
'/api/integration/v1/document/translate/status',
params={'documentId': id})
def translate(self, id, files):
"""Translate the selected document using the uploaded translation file.
note::Available only for updatable file formats (in actual practice,
these currently include resource files with unique resource IDs)
This assigns a task to be processed; the translation
job may not be finished at the time the request is completed.
:param id: The document identifier.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``)
for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional
headers to add for the file
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return self.send_put_request(
'/api/integration/v1/document/translate',
files=files,
params={'documentId': id})
def request_export(self, document_ids, target_type='target'):
"""Sends task to export translations
:param document_ids: The document identifier string or list of the identifier.
:param target_type: (optional): The translation document type: xliff or target.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
if isinstance(document_ids, str):
document_ids = [document_ids]
params = {
'documentIds': '\n'.join(document_ids),
'type': target_type
}
return self.send_post_request('/api/integration/v1/document/export', params=params)
def download_export_result(self, task_id):
"""Download the results of export
:param task_id: The export task identifier
"""
return self.send_get_request('/api/integration/v1/document/export/%s' % task_id, stream=True)
def assign(self, document_id, stage_number, executive_user_id):
params = {
'documentId': document_id,
'stageNumber': stage_number,
}
data = {
"executives": [
{
"id": executive_user_id,
"wordsCount": 0
}
],
"minWordsCountForExecutive": 0,
"assignmentMode": "distributeAmongAll"
}
return self.send_post_request('/api/integration/v1/document/assign', params=params, json=data)
def unassign(self, document_id, stage_number, executive_user_id):
params = {
'documentId': document_id,
'stageNumber': stage_number,
}
return self.send_post_request('/api/integration/v1/document/unassign', params=params, json=executive_user_id)
class Account(BaseResource):
def search_my_team(self, params):
return self.send_post_request('/api/integration/v1/account/searchMyTeam', json=params)
| 35.104784
| 120
| 0.617416
|
eec77c7ecdb70944fb5989b063f2be4f39be43db
| 961
|
py
|
Python
|
app/sheet.py
|
vprnet/lyme-disease
|
a8d457d08c6ccd336d62d3ccb7063b875affc14c
|
[
"Apache-2.0"
] | 1
|
2015-05-26T03:35:28.000Z
|
2015-05-26T03:35:28.000Z
|
app/sheet.py
|
vprnet/lyme-disease
|
a8d457d08c6ccd336d62d3ccb7063b875affc14c
|
[
"Apache-2.0"
] | null | null | null |
app/sheet.py
|
vprnet/lyme-disease
|
a8d457d08c6ccd336d62d3ccb7063b875affc14c
|
[
"Apache-2.0"
] | null | null | null |
from google_spreadsheet.api import SpreadsheetAPI
from config import GOOGLE_SPREADSHEET
def list_sheets():
"""The API sheet_key is not the same as the key in the URL. This function
just prints out all sheet keys"""
api = SpreadsheetAPI(GOOGLE_SPREADSHEET['USER'],
GOOGLE_SPREADSHEET['PASSWORD'],
GOOGLE_SPREADSHEET['SOURCE'])
spreadsheets = api.list_spreadsheets()
for sheet in spreadsheets:
print sheet
def get_google_sheet(sheet_key='1gvZaPys4MZxUT1qwXQJkPqupnfdxODgLEA-f4MJzAM8', sheet_id='od7'):
"""Uses python_google_spreadsheet API to interact with sheet"""
api = SpreadsheetAPI(GOOGLE_SPREADSHEET['USER'],
GOOGLE_SPREADSHEET['PASSWORD'],
GOOGLE_SPREADSHEET['SOURCE'])
sheet = api.get_worksheet(sheet_key, sheet_id)
print sheet
sheet_object = sheet.get_rows()
return sheet_object
def sheet_to_json():
sheet = get_google_sheet()
print sheet
sheet_to_json()
| 30.03125
| 95
| 0.727367
|
27480df416e85ef2133108220137fc025a404743
| 796
|
py
|
Python
|
examples/AddressBook/server.py
|
novaweb-mobi/nova-api
|
2887118ff10d18f366ce661262bd25bb96648470
|
[
"MIT"
] | 3
|
2020-09-08T23:33:41.000Z
|
2021-12-24T20:50:13.000Z
|
examples/AddressBook/server.py
|
novaweb-mobi/nova-api
|
2887118ff10d18f366ce661262bd25bb96648470
|
[
"MIT"
] | 39
|
2020-07-29T12:34:14.000Z
|
2022-03-05T16:50:29.000Z
|
examples/AddressBook/server.py
|
novaweb-mobi/nova-api
|
2887118ff10d18f366ce661262bd25bb96648470
|
[
"MIT"
] | 1
|
2021-03-05T19:41:58.000Z
|
2021-03-05T19:41:58.000Z
|
import connexion
from flask_cors import CORS
import nova_api
nova_api.logger.info("Test")
debug = True
port = 8080
version = "1"
# Import entity and dao
mod = __import__("ContactDAO", fromlist=["ContactDAO"])
contact_dao = getattr(mod, "ContactDAO")
mod = __import__("PhoneDAO", fromlist=["PhoneDAO"])
phone_dao = getattr(mod, "PhoneDAO")
# Create the table in the database
dao = contact_dao()
dao.create_table_if_not_exists()
dao.close()
dao = phone_dao()
dao.create_table_if_not_exists()
dao.close()
# Create the application instance
app = connexion.App(__name__, specification_dir=".")
CORS(app.app)
# Add the api to the flask server
app.add_api("contact_api.yml")
print("Done adding api for {ent}".format(ent="Contact"))
if __name__ == '__main__':
app.run(debug=debug, port=port)
| 22.111111
| 56
| 0.741206
|
17ba068c034329824f6ff9658114f40c6d3a4cd4
| 2,739
|
py
|
Python
|
CIM16/IEC61970/Generation/Production/InflowForecast.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
CIM16/IEC61970/Generation/Production/InflowForecast.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
CIM16/IEC61970/Generation/Production/InflowForecast.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 1
|
2021-04-02T18:04:49.000Z
|
2021-04-02T18:04:49.000Z
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM16.IEC61970.Core.RegularIntervalSchedule import RegularIntervalSchedule
class InflowForecast(RegularIntervalSchedule):
"""Natural water inflow to a reservoir, usually forecasted from predicted rain and snowmelt. Typically in one hour increments for up to 10 days. The forecast is given in average cubic meters per second over the time increment.Natural water inflow to a reservoir, usually forecasted from predicted rain and snowmelt. Typically in one hour increments for up to 10 days. The forecast is given in average cubic meters per second over the time increment.
"""
def __init__(self, Reservoir=None, *args, **kw_args):
"""Initialises a new 'InflowForecast' instance.
@param Reservoir: A reservoir may have a 'natural' inflow forecast.
"""
self._Reservoir = None
self.Reservoir = Reservoir
super(InflowForecast, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["Reservoir"]
_many_refs = []
def getReservoir(self):
"""A reservoir may have a 'natural' inflow forecast.
"""
return self._Reservoir
def setReservoir(self, value):
if self._Reservoir is not None:
filtered = [x for x in self.Reservoir.InflowForecasts if x != self]
self._Reservoir._InflowForecasts = filtered
self._Reservoir = value
if self._Reservoir is not None:
if self not in self._Reservoir._InflowForecasts:
self._Reservoir._InflowForecasts.append(self)
Reservoir = property(getReservoir, setReservoir)
| 44.901639
| 453
| 0.725447
|
7c09c1fea3f9a1b3fb9b255177fde3040dcfa7e7
| 47
|
py
|
Python
|
ssz/constants.py
|
Bhargavasomu/py-ssz
|
8ed62a3a22959f30acb4e20c58011389cd46638f
|
[
"MIT"
] | null | null | null |
ssz/constants.py
|
Bhargavasomu/py-ssz
|
8ed62a3a22959f30acb4e20c58011389cd46638f
|
[
"MIT"
] | null | null | null |
ssz/constants.py
|
Bhargavasomu/py-ssz
|
8ed62a3a22959f30acb4e20c58011389cd46638f
|
[
"MIT"
] | null | null | null |
BYTES_PREFIX_LENGTH = 4
LIST_PREFIX_LENGTH = 4
| 15.666667
| 23
| 0.829787
|
447256fe0c17e970fc602df30f4a072958edfd14
| 1,317
|
py
|
Python
|
doc/charts/doughnut.py
|
ankitJoshi03/openpyxlzip
|
f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647
|
[
"MIT"
] | null | null | null |
doc/charts/doughnut.py
|
ankitJoshi03/openpyxlzip
|
f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647
|
[
"MIT"
] | null | null | null |
doc/charts/doughnut.py
|
ankitJoshi03/openpyxlzip
|
f3b8aa2f80f9d8bc31ce5fcf05c822d88d2ff647
|
[
"MIT"
] | null | null | null |
from openpyxlzip import Workbook
from openpyxlzip.chart import (
DoughnutChart,
Reference,
Series,
)
from openpyxlzip.chart.series import DataPoint
data = [
['Pie', 2014, 2015],
['Plain', 40, 50],
['Jam', 2, 10],
['Lime', 20, 30],
['Chocolate', 30, 40],
]
wb = Workbook()
ws = wb.active
for row in data:
ws.append(row)
chart = DoughnutChart()
labels = Reference(ws, min_col=1, min_row=2, max_row=5)
data = Reference(ws, min_col=2, min_row=1, max_row=5)
chart.add_data(data, titles_from_data=True)
chart.set_categories(labels)
chart.title = "Doughnuts sold by category"
chart.style = 26
# Cut the first slice out of the doughnut
slices = [DataPoint(idx=i) for i in range(4)]
plain, jam, lime, chocolate = slices
chart.series[0].data_points = slices
plain.graphicalProperties.solidFill = "FAE1D0"
jam.graphicalProperties.solidFill = "BB2244"
lime.graphicalProperties.solidFill = "22DD22"
chocolate.graphicalProperties.solidFill = "61210B"
chocolate.explosion = 10
ws.add_chart(chart, "E1")
from copy import deepcopy
chart2 = deepcopy(chart)
chart2.title = None
data = Reference(ws, min_col=3, min_row=1, max_row=5)
series2 = Series(data, title_from_data=True)
series2.data_points = slices
chart2.series.append(series2)
ws.add_chart(chart2, "E17")
wb.save("doughnut.xlsx")
| 23.517857
| 55
| 0.725133
|
28379a75e888eab9e9a5afb9f14cd28535fca98d
| 493
|
py
|
Python
|
Class-2/Activities/03-Stu_KidInCandyStore-LoopsRecap/Solved/kid_in_candy_store.py
|
dillon-wright/Python-Week-3
|
97ef515862c2b07be5bc420f4574488eaf12de2b
|
[
"Unlicense"
] | null | null | null |
Class-2/Activities/03-Stu_KidInCandyStore-LoopsRecap/Solved/kid_in_candy_store.py
|
dillon-wright/Python-Week-3
|
97ef515862c2b07be5bc420f4574488eaf12de2b
|
[
"Unlicense"
] | null | null | null |
Class-2/Activities/03-Stu_KidInCandyStore-LoopsRecap/Solved/kid_in_candy_store.py
|
dillon-wright/Python-Week-3
|
97ef515862c2b07be5bc420f4574488eaf12de2b
|
[
"Unlicense"
] | null | null | null |
# The list of candies to print to the screen
candy_list = ["Snickers", "Kit Kat", "Sour Patch Kids", "Juicy Fruit", "Swedish Fish", "Skittles", "Hershey Bar", "Starbursts", "M&Ms"]
# The amount of candy the user will be allowed to choose
allowance = 5
# The list used to store all of the candies selected inside of
candy_cart = []
# Print out options
X = int(input("How many Candys you want bro?"))
A = 0
while A < X:
if A > allowance:
break
print(candy_list[A])
A += 1
| 27.388889
| 135
| 0.663286
|
c76441fa48a275ba6092917c7e13ca741846aafc
| 996
|
py
|
Python
|
app/main/model/user_team.py
|
t2venture/room40-underground
|
5fb15ef046412961c8a0407a9b93b2684c819dc9
|
[
"MIT"
] | null | null | null |
app/main/model/user_team.py
|
t2venture/room40-underground
|
5fb15ef046412961c8a0407a9b93b2684c819dc9
|
[
"MIT"
] | null | null | null |
app/main/model/user_team.py
|
t2venture/room40-underground
|
5fb15ef046412961c8a0407a9b93b2684c819dc9
|
[
"MIT"
] | null | null | null |
from .. import db, flask_bcrypt
import datetime
import jwt
from app.main.model.blacklist import BlacklistToken
from ..config import key
class UserTeam(db.Model):
"""UserTeam Model is used to link users within a team"""
__tablename__ = "user_team"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
team_id = db.Column(db.Integer, db.ForeignKey('team.id'))
role=db.Column(db.String(50), unique=False, nullable=False, default='Viewer')
created_by=db.Column(db.Integer, db.ForeignKey('user.id'))
modified_by=db.Column(db.Integer, db.ForeignKey('user.id'))
created_time=db.Column(db.DateTime, unique=False, nullable=False)
modified_time=db.Column(db.DateTime, unique=False, nullable=False)
is_deleted=db.Column(db.Boolean, unique=False, nullable=False, default=False)
is_active=db.Column(db.Boolean, unique=False, nullable=False, default=True)
#ROLE IS OWNER, EDITOR, VIEWER
| 47.428571
| 81
| 0.737952
|
f9be7e3662bdecca7cc62870887ebf9b5481cc12
| 6,128
|
py
|
Python
|
test/lint/check-rpc-mappings.py
|
tngc-one/tngcoin
|
1382521c4f897cf798e840fee2ce9abd70bbb99b
|
[
"MIT"
] | null | null | null |
test/lint/check-rpc-mappings.py
|
tngc-one/tngcoin
|
1382521c4f897cf798e840fee2ce9abd70bbb99b
|
[
"MIT"
] | null | null | null |
test/lint/check-rpc-mappings.py
|
tngc-one/tngcoin
|
1382521c4f897cf798e840fee2ce9abd70bbb99b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The TNGC Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Check RPC argument consistency."""
from collections import defaultdict
import os
import re
import sys
# Source files (relative to root) to scan for dispatch tables
SOURCES = [
"src/rpc/server.cpp",
"src/rpc/blockchain.cpp",
"src/rpc/mining.cpp",
"src/rpc/misc.cpp",
"src/rpc/net.cpp",
"src/rpc/rawtransaction.cpp",
"src/wallet/rpcwallet.cpp",
]
# Source file (relative to root) containing conversion mapping
SOURCE_CLIENT = 'src/rpc/client.cpp'
# Argument names that should be ignored in consistency checks
IGNORE_DUMMY_ARGS = {'dummy', 'arg0', 'arg1', 'arg2', 'arg3', 'arg4', 'arg5', 'arg6', 'arg7', 'arg8', 'arg9'}
class RPCCommand:
def __init__(self, name, args):
self.name = name
self.args = args
class RPCArgument:
def __init__(self, names, idx):
self.names = names
self.idx = idx
self.convert = False
def parse_string(s):
assert s[0] == '"'
assert s[-1] == '"'
return s[1:-1]
def process_commands(fname):
"""Find and parse dispatch table in implementation file `fname`."""
cmds = []
in_rpcs = False
with open(fname, "r", encoding="utf8") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
if re.match(r"static const CRPCCommand .*\[\] =", line):
in_rpcs = True
else:
if line.startswith('};'):
in_rpcs = False
elif '{' in line and '"' in line:
m = re.search(r'{ *("[^"]*"), *("[^"]*"), *&([^,]*), *{([^}]*)} *},', line)
assert m, 'No match to table expression: %s' % line
name = parse_string(m.group(2))
args_str = m.group(4).strip()
if args_str:
args = [RPCArgument(parse_string(x.strip()).split('|'), idx) for idx, x in enumerate(args_str.split(','))]
else:
args = []
cmds.append(RPCCommand(name, args))
assert not in_rpcs and cmds, "Something went wrong with parsing the C++ file: update the regexps"
return cmds
def process_mapping(fname):
"""Find and parse conversion table in implementation file `fname`."""
cmds = []
in_rpcs = False
with open(fname, "r", encoding="utf8") as f:
for line in f:
line = line.rstrip()
if not in_rpcs:
if line == 'static const CRPCConvertParam vRPCConvertParams[] =':
in_rpcs = True
else:
if line.startswith('};'):
in_rpcs = False
elif '{' in line and '"' in line:
m = re.search(r'{ *("[^"]*"), *([0-9]+) *, *("[^"]*") *},', line)
assert m, 'No match to table expression: %s' % line
name = parse_string(m.group(1))
idx = int(m.group(2))
argname = parse_string(m.group(3))
cmds.append((name, idx, argname))
assert not in_rpcs and cmds
return cmds
def main():
if len(sys.argv) != 2:
print('Usage: {} ROOT-DIR'.format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
root = sys.argv[1]
# Get all commands from dispatch tables
cmds = []
for fname in SOURCES:
cmds += process_commands(os.path.join(root, fname))
cmds_by_name = {}
for cmd in cmds:
cmds_by_name[cmd.name] = cmd
# Get current convert mapping for client
client = SOURCE_CLIENT
mapping = set(process_mapping(os.path.join(root, client)))
print('* Checking consistency between dispatch tables and vRPCConvertParams')
# Check mapping consistency
errors = 0
for (cmdname, argidx, argname) in mapping:
try:
rargnames = cmds_by_name[cmdname].args[argidx].names
except IndexError:
print('ERROR: %s argument %i (named %s in vRPCConvertParams) is not defined in dispatch table' % (cmdname, argidx, argname))
errors += 1
continue
if argname not in rargnames:
print('ERROR: %s argument %i is named %s in vRPCConvertParams but %s in dispatch table' % (cmdname, argidx, argname, rargnames), file=sys.stderr)
errors += 1
# Check for conflicts in vRPCConvertParams conversion
# All aliases for an argument must either be present in the
# conversion table, or not. Anything in between means an oversight
# and some aliases won't work.
for cmd in cmds:
for arg in cmd.args:
convert = [((cmd.name, arg.idx, argname) in mapping) for argname in arg.names]
if any(convert) != all(convert):
print('ERROR: %s argument %s has conflicts in vRPCConvertParams conversion specifier %s' % (cmd.name, arg.names, convert))
errors += 1
arg.convert = all(convert)
# Check for conversion difference by argument name.
# It is preferable for API consistency that arguments with the same name
# have the same conversion, so bin by argument name.
all_methods_by_argname = defaultdict(list)
converts_by_argname = defaultdict(list)
for cmd in cmds:
for arg in cmd.args:
for argname in arg.names:
all_methods_by_argname[argname].append(cmd.name)
converts_by_argname[argname].append(arg.convert)
for argname, convert in converts_by_argname.items():
if all(convert) != any(convert):
if argname in IGNORE_DUMMY_ARGS:
# these are testing or dummy, don't warn for them
continue
print('WARNING: conversion mismatch for argument named %s (%s)' %
(argname, list(zip(all_methods_by_argname[argname], converts_by_argname[argname]))))
sys.exit(errors > 0)
if __name__ == '__main__':
main()
| 37.595092
| 157
| 0.581593
|
31a869df251b6d341e3235f73158ce4609ad2b0d
| 2,945
|
py
|
Python
|
S4/S4 Library/simulation/gsi_handlers/sim_timeline_handlers.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | 1
|
2021-05-20T19:33:37.000Z
|
2021-05-20T19:33:37.000Z
|
S4/S4 Library/simulation/gsi_handlers/sim_timeline_handlers.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
S4/S4 Library/simulation/gsi_handlers/sim_timeline_handlers.py
|
NeonOcean/Environment
|
ca658cf66e8fd6866c22a4a0136d415705b36d26
|
[
"CC-BY-4.0"
] | null | null | null |
import contextlib
from gsi_handlers.gameplay_archiver import GameplayArchiver
from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers
import services
sim_timeline_archive_schema = GsiGridSchema(label='Sim Time Line', sim_specific=True)
sim_timeline_archive_schema.add_field('game_time', label='GameTime', width=40)
sim_timeline_archive_schema.add_field('module', label='Module', width=35)
sim_timeline_archive_schema.add_field('status', label='Status', width=40)
sim_timeline_archive_schema.add_field('message', label='Message', width=35)
sim_timeline_archive_schema.add_field('interaction_id', label='Interaction ID', hidden=True, type=GsiFieldVisualizers.INT)
sim_timeline_archive_schema.add_field('interaction', label='Interaction', width=40)
sim_timeline_archive_schema.add_field('target', label='Target', width=40)
sim_timeline_archive_schema.add_field('initiator', label='Initiator', width=30)
sim_timeline_archive_schema.add_field('duration', label='Duration(Sim Game Time Minutes)', width=50)
archiver = GameplayArchiver('sim_time_line_archive', sim_timeline_archive_schema)
@contextlib.contextmanager
def archive_sim_timeline_context_manager(sim, module, log_message, interaction=None):
if not archiver.enabled:
yield None
else:
services_time_service = services.time_service()
if services_time_service is not None and services_time_service.sim_timeline is not None:
start_time = services_time_service.sim_timeline.now
else:
start_time = None
try:
archive_sim_timeline(sim, module, 'Start', log_message, interaction=interaction)
yield None
finally:
duration = None
if start_time is not None:
services_time_service = services.time_service()
if services_time_service is not None:
if services_time_service.sim_timeline is not None:
duration = services_time_service.sim_timeline.now - start_time
archive_sim_timeline(sim, module, 'Completed', log_message, interaction=interaction, duration=duration)
def archive_sim_timeline(sim, module, status, message_data, interaction=None, duration=None):
services_time_service = services.time_service()
if services_time_service is not None and services_time_service.sim_timeline is not None:
now = services_time_service.sim_timeline.now
else:
now = None
archive_data = {'game_time': str(now), 'module': module, 'message': message_data, 'duration': '{} min ({})'.format(str(duration.in_minutes()), str(duration)) if duration is not None else 'None', 'status': status}
if interaction is not None:
archive_data.update({'interaction_id': interaction.id, 'interaction': str(interaction.affordance.__name__), 'target': str(interaction.target), 'initiator': str(interaction.sim)})
archiver.archive(data=archive_data, object_id=sim.id)
| 60.102041
| 216
| 0.747708
|
0e0cd521f28daadf211d86aed469a20001b61797
| 12,731
|
py
|
Python
|
example/image-classification/common/fit.py
|
UNIXCODERS/incubator-mxnet
|
5c5a904209900e21b20ca206b043ea7a8252ebfc
|
[
"Apache-2.0"
] | 1
|
2018-02-26T02:46:07.000Z
|
2018-02-26T02:46:07.000Z
|
example/image-classification/common/fit.py
|
UNIXCODERS/incubator-mxnet
|
5c5a904209900e21b20ca206b043ea7a8252ebfc
|
[
"Apache-2.0"
] | null | null | null |
example/image-classification/common/fit.py
|
UNIXCODERS/incubator-mxnet
|
5c5a904209900e21b20ca206b043ea7a8252ebfc
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" example train fit utility """
import logging
import os
import time
import re
import math
import mxnet as mx
def _get_lr_scheduler(args, kv):
if 'lr_factor' not in args or args.lr_factor >= 1:
return (args.lr, None)
epoch_size = args.num_examples / args.batch_size
if 'dist' in args.kv_store:
epoch_size /= kv.num_workers
begin_epoch = args.load_epoch if args.load_epoch else 0
if 'pow' in args.lr_step_epochs:
lr = args.lr
max_up = args.num_epochs * epoch_size
pwr = float(re.sub('pow[- ]*', '', args.lr_step_epochs))
poly_sched = mx.lr_scheduler.PolyScheduler(max_up, lr, pwr)
return (lr, poly_sched)
step_epochs = [int(l) for l in args.lr_step_epochs.split(',')]
lr = args.lr
for s in step_epochs:
if begin_epoch >= s:
lr *= args.lr_factor
if lr != args.lr:
logging.info('Adjust learning rate to %e for epoch %d',
lr, begin_epoch)
steps = [epoch_size * (x - begin_epoch)
for x in step_epochs if x - begin_epoch > 0]
return (lr, mx.lr_scheduler.MultiFactorScheduler(step=steps, factor=args.lr_factor))
def _load_model(args, rank=0):
if 'load_epoch' not in args or args.load_epoch is None:
return (None, None, None)
assert args.model_prefix is not None
model_prefix = args.model_prefix
if rank > 0 and os.path.exists("%s-%d-symbol.json" % (model_prefix, rank)):
model_prefix += "-%d" % (rank)
sym, arg_params, aux_params = mx.model.load_checkpoint(
model_prefix, args.load_epoch)
logging.info('Loaded model %s_%04d.params', model_prefix, args.load_epoch)
return (sym, arg_params, aux_params)
def _save_model(args, rank=0):
if args.model_prefix is None:
return None
dst_dir = os.path.dirname(args.model_prefix)
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
return mx.callback.do_checkpoint(args.model_prefix if rank == 0 else "%s-%d" % (
args.model_prefix, rank))
def add_fit_args(parser):
"""
parser : argparse.ArgumentParser
return a parser added with args required by fit
"""
train = parser.add_argument_group('Training', 'model training')
train.add_argument('--network', type=str,
help='the neural network to use')
train.add_argument('--num-layers', type=int,
help='number of layers in the neural network, \
required by some networks such as resnet')
train.add_argument('--gpus', type=str,
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu')
train.add_argument('--kv-store', type=str, default='device',
help='key-value store type')
train.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
train.add_argument('--lr', type=float, default=0.1,
help='initial learning rate')
train.add_argument('--lr-factor', type=float, default=0.1,
help='the ratio to reduce lr on each step')
train.add_argument('--lr-step-epochs', type=str,
help='the epochs to reduce the lr, e.g. 30,60')
train.add_argument('--initializer', type=str, default='default',
help='the initializer type')
train.add_argument('--optimizer', type=str, default='sgd',
help='the optimizer type')
train.add_argument('--mom', type=float, default=0.9,
help='momentum for sgd')
train.add_argument('--wd', type=float, default=0.0001,
help='weight decay for sgd')
train.add_argument('--batch-size', type=int, default=128,
help='the batch size')
train.add_argument('--disp-batches', type=int, default=20,
help='show progress for every n batches')
train.add_argument('--model-prefix', type=str,
help='model prefix')
parser.add_argument('--monitor', dest='monitor', type=int, default=0,
help='log network parameters every N iters if larger than 0')
train.add_argument('--load-epoch', type=int,
help='load the model on an epoch using the model-load-prefix')
train.add_argument('--top-k', type=int, default=0,
help='report the top-k accuracy. 0 means no report.')
train.add_argument('--loss', type=str, default='',
help='show the cross-entropy or nll loss. ce strands for cross-entropy, nll-loss stands for likelihood loss')
train.add_argument('--test-io', type=int, default=0,
help='1 means test reading speed without training')
train.add_argument('--dtype', type=str, default='float32',
help='precision: float32 or float16')
train.add_argument('--gc-type', type=str, default='none',
help='type of gradient compression to use, \
takes `2bit` or `none` for now')
train.add_argument('--gc-threshold', type=float, default=0.5,
help='threshold for 2bit gradient compression')
# additional parameters for large batch sgd
train.add_argument('--macrobatch-size', type=int, default=0,
help='distributed effective batch size')
train.add_argument('--warmup-epochs', type=int, default=5,
help='the epochs to ramp-up lr to scaled large-batch value')
train.add_argument('--warmup-strategy', type=str, default='linear',
help='the ramping-up strategy for large batch sgd')
return train
def fit(args, network, data_loader, **kwargs):
"""
train a model
args : argparse returns
network : the symbol definition of the nerual network
data_loader : function that returns the train and val data iterators
"""
# kvstore
kv = mx.kvstore.create(args.kv_store)
if args.gc_type != 'none':
kv.set_gradient_compression({'type': args.gc_type,
'threshold': args.gc_threshold})
# logging
head = '%(asctime)-15s Node[' + str(kv.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
# data iterators
(train, val) = data_loader(args, kv)
if args.test_io:
tic = time.time()
for i, batch in enumerate(train):
for j in batch.data:
j.wait_to_read()
if (i + 1) % args.disp_batches == 0:
logging.info('Batch [%d]\tSpeed: %.2f samples/sec', i,
args.disp_batches * args.batch_size / (time.time() - tic))
tic = time.time()
return
# load model
if 'arg_params' in kwargs and 'aux_params' in kwargs:
arg_params = kwargs['arg_params']
aux_params = kwargs['aux_params']
else:
sym, arg_params, aux_params = _load_model(args, kv.rank)
if sym is not None:
assert sym.tojson() == network.tojson()
# save model
checkpoint = _save_model(args, kv.rank)
# devices for training
devs = mx.cpu() if args.gpus is None or args.gpus == "" else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
# learning rate
lr, lr_scheduler = _get_lr_scheduler(args, kv)
# create model
model = mx.mod.Module(
context=devs,
symbol=network
)
lr_scheduler = lr_scheduler
optimizer_params = {
'learning_rate': lr,
'wd': args.wd,
'lr_scheduler': lr_scheduler,
'multi_precision': True}
# Only a limited number of optimizers have 'momentum' property
has_momentum = {'sgd', 'dcasgd', 'nag'}
if args.optimizer in has_momentum:
optimizer_params['momentum'] = args.mom
monitor = mx.mon.Monitor(
args.monitor, pattern=".*") if args.monitor > 0 else None
# A limited number of optimizers have a warmup period
has_warmup = {'lbsgd', 'lbnag'}
if args.optimizer in has_warmup:
if 'dist' in args.kv_store:
nworkers = kv.num_workers
else:
nworkers = 1
epoch_size = args.num_examples / args.batch_size / nworkers
if epoch_size < 1:
epoch_size = 1
macrobatch_size = args.macrobatch_size
if macrobatch_size < args.batch_size * nworkers:
macrobatch_size = args.batch_size * nworkers
#batch_scale = round(float(macrobatch_size) / args.batch_size / nworkers +0.4999)
batch_scale = math.ceil(
float(macrobatch_size) / args.batch_size / nworkers)
optimizer_params['updates_per_epoch'] = epoch_size
optimizer_params['begin_epoch'] = args.load_epoch if args.load_epoch else 0
optimizer_params['batch_scale'] = batch_scale
optimizer_params['warmup_strategy'] = args.warmup_strategy
optimizer_params['warmup_epochs'] = args.warmup_epochs
optimizer_params['num_epochs'] = args.num_epochs
if args.initializer == 'default':
if args.network == 'alexnet':
# AlexNet will not converge using Xavier
initializer = mx.init.Normal()
else:
initializer = mx.init.Xavier(
rnd_type='gaussian', factor_type="in", magnitude=2)
# initializer = mx.init.Xavier(factor_type="in", magnitude=2.34),
elif args.initializer == 'xavier':
initializer = mx.init.Xavier()
elif args.initializer == 'msra':
initializer = mx.init.MSRAPrelu()
elif args.initializer == 'orthogonal':
initializer = mx.init.Orthogonal()
elif args.initializer == 'normal':
initializer = mx.init.Normal()
elif args.initializer == 'uniform':
initializer = mx.init.Uniform()
elif args.initializer == 'one':
initializer = mx.init.One()
elif args.initializer == 'zero':
initializer = mx.init.Zero()
# evaluation metrices
eval_metrics = ['accuracy']
if args.top_k > 0:
eval_metrics.append(mx.metric.create(
'top_k_accuracy', top_k=args.top_k))
supported_loss = ['ce', 'nll_loss']
if len(args.loss) > 0:
# ce or nll loss is only applicable to softmax output
loss_type_list = args.loss.split(',')
if 'softmax_output' in network.list_outputs():
for loss_type in loss_type_list:
loss_type = loss_type.strip()
if loss_type == 'nll':
loss_type = 'nll_loss'
if loss_type not in supported_loss:
logging.warning(loss_type + ' is not an valid loss type, only cross-entropy or ' \
'negative likelihood loss is supported!')
else:
eval_metrics.append(mx.metric.create(loss_type))
else:
logging.warning("The output is not softmax_output, loss argument will be skipped!")
# callbacks that run after each batch
batch_end_callbacks = [mx.callback.Speedometer(
args.batch_size, args.disp_batches)]
if 'batch_end_callback' in kwargs:
cbs = kwargs['batch_end_callback']
batch_end_callbacks += cbs if isinstance(cbs, list) else [cbs]
# run
model.fit(train,
begin_epoch=args.load_epoch if args.load_epoch else 0,
num_epoch=args.num_epochs,
eval_data=val,
eval_metric=eval_metrics,
kvstore=kv,
optimizer=args.optimizer,
optimizer_params=optimizer_params,
initializer=initializer,
arg_params=arg_params,
aux_params=aux_params,
batch_end_callback=batch_end_callbacks,
epoch_end_callback=checkpoint,
allow_missing=True,
monitor=monitor)
| 41.740984
| 132
| 0.615191
|
32ccae4a984901a46262a2da6ed606594f66dc5d
| 3,181
|
py
|
Python
|
services/discovery/jobs/periodic/uptime.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
services/discovery/jobs/periodic/uptime.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
services/discovery/jobs/periodic/uptime.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# Uptime check
# ---------------------------------------------------------------------
# Copyright (C) 2007-2015 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import datetime
# NOC modules
from noc.services.discovery.jobs.base import DiscoveryCheck
from noc.fm.models.uptime import Uptime
from noc.core.mx import send_message, MX_PROFILE_ID
from noc.config import config
from noc.core.hash import hash_int
from noc.core.comp import smart_bytes
class UptimeCheck(DiscoveryCheck):
"""
Uptime discovery
"""
name = "uptime"
required_script = "get_uptime"
def handler(self):
self.logger.debug("Checking uptime")
uptime = self.object.scripts.get_uptime()
self.logger.debug("Received uptime: %s", uptime)
if not uptime:
return
reboot_ts = Uptime.register(self.object, uptime)
if not reboot_ts:
return
self.set_artefact("reboot", True)
if config.message.enable_reboot:
self.logger.info("Sending reboot message to mx")
self.send_reboot_message(reboot_ts)
def send_reboot_message(self, ts: datetime.datetime) -> None:
mo = self.object
data = {
"ts": ts.isoformat(),
"managed_object": {
"id": str(mo.id),
"name": mo.name,
"bi_id": str(mo.bi_id),
"address": mo.address,
"pool": mo.pool.name,
"profile": mo.profile.name,
"object_profile": {
"id": str(mo.object_profile.id),
"name": mo.object_profile.name,
},
"administrative_domain": {
"id": str(mo.administrative_domain.id),
"name": str(mo.administrative_domain.name),
},
"segment": {
"id": str(mo.segment.id),
"name": str(mo.segment.name),
},
"x": mo.x,
"y": mo.y,
},
}
if mo.vendor:
data["managed_object"]["vendor"] = mo.vendor.name
if mo.platform:
data["managed_object"]["platform"] = mo.platform.name
if mo.version:
data["managed_object"]["version"] = mo.version.version
if mo.container:
data["managed_object"]["container"] = {
"id": str(mo.container.id),
"name": mo.container.name,
}
if mo.remote_system and mo.remote_id:
data["managed_object"]["remote_system"] = {
"id": str(mo.remote_system.id),
"name": mo.remote_system.name,
}
data["managed_object"]["remote_id"] = str(mo.remote_id)
send_message(
data,
message_type="reboot",
headers={
MX_PROFILE_ID: smart_bytes(mo.object_profile.id),
},
sharding_key=hash_int(mo.id) & 0xFFFFFFFF,
)
| 33.840426
| 71
| 0.493555
|
487c8708edf435a4433939623877464249f87114
| 8,208
|
py
|
Python
|
lotka-volterra/VI_for_SDEs.py
|
kw-lee/VIforSDEs
|
dcba3832aaad0aebc921a3b0628c43046d651629
|
[
"MIT"
] | 43
|
2018-03-07T16:46:08.000Z
|
2022-03-12T11:16:39.000Z
|
lotka-volterra/VI_for_SDEs.py
|
SourangshuGhosh/VIforSDEs
|
fd432ffeb9290ef2dc1e8a38638c55556738dc3d
|
[
"MIT"
] | 1
|
2018-03-30T18:06:11.000Z
|
2018-03-30T18:06:39.000Z
|
lotka-volterra/VI_for_SDEs.py
|
SourangshuGhosh/VIforSDEs
|
fd432ffeb9290ef2dc1e8a38638c55556738dc3d
|
[
"MIT"
] | 8
|
2018-09-12T10:36:00.000Z
|
2021-11-17T05:03:25.000Z
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
# python data types
import numpy as np
import scipy.stats as stats
from datetime import datetime
# model-specific imports
from lotka_volterra_data_augmentation import *
from lotka_volterra_loss import ELBO
from network_utils import Weight, Bias
tfd = tf.contrib.distributions
tfb = tfd.bijectors
DTYPE = tf.float32
NP_DTYPE = np.float32
class Model():
def __init__(self, network_params, p, dt, obs, params, priors, features):
weights = {}
for i in range(1, network_params['num_hidden_layers'] + 1):
with tf.variable_scope('hidden_layer_%d' % i):
if i == 1:
weights['w%i' % i] = Weight(
[1, no_input, network_params['hidden_layer_width']], DTYPE).tile(p)
else:
weights['w%i' % i] = Weight(
[1, network_params['hidden_layer_width'], network_params['hidden_layer_width']], DTYPE).tile(p)
weights['b%i' % i] = Bias(
[1, 1, network_params['hidden_layer_width']], DTYPE).tile(p)
with tf.variable_scope('output_layer'):
weights['w0'] = Weight(
[1, network_params['hidden_layer_width'], 5], DTYPE).tile(p)
weights['b0'] = Bias([1, 1, 5], DTYPE).tile(p)
self.weights = weights
self.network_params = network_params
self.obs = obs
self.params = params
self.priors = priors
self.features = features
self.p = p
self.dt = dt
# building computational graph
self._build()
def _build(self):
'''
buidling model graph
'''
print("Building graph...")
# launching functions to create forward sims and calc the loss
with tf.name_scope('diffusion_bridge'):
paths, variational_mu, variational_sigma = self._diff_bridge()
with tf.name_scope('ELBO'):
mean_loss = ELBO(self.obs, paths, variational_mu,
variational_sigma, self.params, self.priors, self.p, self.dt)
tf.summary.scalar('mean_loss', mean_loss)
# specifying optimizer and gradient clipping for backprop
with tf.name_scope('optimize'):
opt = tf.train.AdamOptimizer(1e-3)
gradients, variables = zip(
*opt.compute_gradients(mean_loss))
global_norm = tf.global_norm(gradients)
gradients, _ = tf.clip_by_global_norm(gradients, 4e3)
self.train_step = opt.apply_gradients(
zip(gradients, variables))
tf.summary.scalar('global_grad_norm', global_norm)
# mean-field approx params to tensorboard
with tf.name_scope('variables'):
with tf.name_scope('theta1'):
tf.summary.scalar('theta1_mean', c1_mean)
tf.summary.scalar('theta1_std', c1_std)
with tf.name_scope('theta2'):
tf.summary.scalar('theta2_mean', c2_mean)
tf.summary.scalar('theta2_std', c2_std)
with tf.name_scope('theta3'):
tf.summary.scalar('theta3_mean', c3_mean)
tf.summary.scalar('theta3_std', c3_std)
self.merged = tf.summary.merge_all()
def _diff_bridge(self):
'''
rolls out rnn cell across the time series
'''
inp = tf.concat(
[self.obs['obs_init'], self.features['feature_init']], 2)
pred_mu, pred_sigma = self._rnn_cell(inp)
mu_store = tf.squeeze(pred_mu)
sigma_store = tf.reshape(pred_sigma, [-1, 4])
output = self._path_sampler(inp[:, 0, 0:2], pred_mu, pred_sigma)
path_store = tf.concat(
[tf.reshape(inp[:, :, 0:2], [-1, 2, 1]), tf.reshape(output, [-1, 2, 1])], 2)
for i in range(int(self.obs['T'] / self.dt) - 1):
x1_next_vec = tf.fill([self.p, 1, 1], self.features['x1_store'][i])
x2_next_vec = tf.fill([self.p, 1, 1], self.features['x2_store'][i])
inp = tf.concat([output, tf.tile([[[inp[0, 0, 2] + self.dt, self.features['tn_store'][i], self.features['x1_store'][i], self.features['x2_store'][i]]]], [
self.p, 1, 1]), tf.concat([x1_next_vec, x2_next_vec], 2) - output], 2)
pred_mu, pred_sigma = self._rnn_cell(inp)
mu_store = tf.concat([mu_store, tf.squeeze(pred_mu)], 1)
sigma_store = tf.concat(
[sigma_store, tf.reshape(pred_sigma, [-1, 4])], 1)
output = self._path_sampler(inp[:, 0, 0:2], pred_mu, pred_sigma)
path_store = tf.concat(
[path_store, tf.reshape(output, [-1, 2, 1])], 2)
sigma_store = tf.reshape(sigma_store, [-1, 2, 2])
mu_store = tf.reshape(mu_store, [-1, 2])
return path_store, mu_store, sigma_store
# the rnn cell called by diff_bridge
def _rnn_cell(self, inp, eps_identity=1e-3):
'''
rnn cell for supplying Gaussian state transitions
:param eps: eps * identity added to diffusion matrix to control numerical stability
'''
hidden_layer = tf.nn.relu(
tf.add(tf.matmul(inp, self.weights['w1']), self.weights['b1']))
for i in range(2, self.network_params['num_hidden_layers'] + 1):
hidden_layer = tf.nn.relu(
tf.add(tf.matmul(hidden_layer, self.weights['w%i' % i]), self.weights['b%i' % i]))
output = tf.add(
tf.matmul(hidden_layer, self.weights['w0']), self.weights['b0'])
mu, sigma_11, sigma_21, sigma_22 = tf.split(output, [2, 1, 1, 1], 2)
# reshaping sigma matrix to lower-triangular cholesky factor
zeros = tf.zeros(tf.shape(sigma_11))
sigma_11 = tf.nn.softplus(sigma_11)
sigma_22 = tf.nn.softplus(sigma_22)
sigma_chol = tf.concat([tf.concat([sigma_11, zeros], 2),
tf.concat([sigma_21, sigma_22], 2)], 1)
sigma = tf.cholesky(tf.matmul(sigma_chol, tf.transpose(sigma_chol, perm=[0, 2, 1])) + eps_identity * tf.tile(
tf.expand_dims(np.identity(2, dtype=np.float32), 0), [self.p, 1, 1]))
return mu, sigma
# functions to return p simulations of a diffison bridge
def _path_sampler(self, inp, mu_nn, sigma_nn):
'''
sample new state using learned Gaussian state transitions
:param inp: current state of system
:param mu_nn: drift vector from RNN
:param sigma_nn: diffusion matrix from RNN as cholesky factor
'''
out_dist = tfd.TransformedDistribution(distribution=tfd.MultivariateNormalTriL(
loc=inp + self.dt * tf.squeeze(mu_nn), scale_tril=tf.sqrt(self.dt) * sigma_nn), bijector=tfb.Softplus(event_ndims=1))
out = tf.expand_dims(out_dist.sample(), 1)
return out
# train the model
def train(self, niter, path):
'''
trains model
:params niter: number of iterations
:params PATH: path to tensorboard output
'''
print("Training model...")
writer = tf.summary.FileWriter(
'%s/%s' % (path, datetime.now().strftime("%d:%m:%y-%H:%M:%S")), sess.graph)
for i in range(niter):
self.train_step.run()
if i % 10 == 0:
summary = sess.run(self.merged)
writer.add_summary(summary, i)
def save(self, path):
'''
save model
'''
saver = tf.train.Saver()
saver.save(sess, path)
print("Model Saved")
def load(self, path):
'''
load model
'''
saver = tf.train.Saver()
saver.restore(sess, path)
print("Model Restored")
if __name__ == "__main__":
with tf.Session() as sess:
lotka_volterra = Model(network_params=NETWORK_PARAMS, p=P,
dt=DT, obs=obs, params=params, priors=PRIORS, features=features)
sess.run(tf.global_variables_initializer())
# desired number of iterations. currently no implementation of a
# convergence criteria.
lotka_volterra.train(25000, PATH_TO_TENSORBOARD_OUTPUT)
| 39.84466
| 166
| 0.585648
|
db309fceda54328839be585127ce4cdd9a19a565
| 5,137
|
py
|
Python
|
YieldCurveSurface/yieldCurveSurface.py
|
letsgoexploring/miscellaneous-python-code
|
20e0b4f87ec7cdbcd7a7b44f236ce9b71f7cf0ee
|
[
"MIT"
] | null | null | null |
YieldCurveSurface/yieldCurveSurface.py
|
letsgoexploring/miscellaneous-python-code
|
20e0b4f87ec7cdbcd7a7b44f236ce9b71f7cf0ee
|
[
"MIT"
] | null | null | null |
YieldCurveSurface/yieldCurveSurface.py
|
letsgoexploring/miscellaneous-python-code
|
20e0b4f87ec7cdbcd7a7b44f236ce9b71f7cf0ee
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# In[1]:
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm,rc
import matplotlib.colors as colors
import matplotlib.dates as mdates
import numpy as np
import pandas as pd
from fredpy import series, window_equalize
import collections
# get_ipython().magic('matplotlib inline')
# In[2]:
# # Import daily interest rate data
y1m= series('DTB4WK')
y3m= series('DTB3')
y6m= series('DTB6')
y1 = series('DGS1')
y5 = series('DGS5')
y10= series('DGS10')
y20= series('DGS20')
y30= series('DGS30')
# Set the date ranges for the data
for x in [y1m,y3m,y6m,y1,y5,y10,y20,y30]:
x.window(['2003-01-01','2016-03-31'])
# In[3]:
# Form individual data frames and the full data frame
y1mFrame= pd.DataFrame({'y1m':y1m.data},index=y1m.datenumbers)
yFrame= pd.DataFrame({'temp':np.nan+y3m.data},index=y3m.datenumbers)
y3mFrame= pd.DataFrame({'y3m':y3m.data},index=y3m.datenumbers)
y3mFrame= pd.DataFrame({'y3m':y3m.data},index=y3m.datenumbers)
y6mFrame= pd.DataFrame({'y6m':y6m.data},index=y6m.datenumbers)
y1Frame = pd.DataFrame({'y1':y1.data},index=y1.datenumbers)
y5Frame = pd.DataFrame({'y5':y5.data},index=y5.datenumbers)
y10Frame= pd.DataFrame({'y10':y10.data},index=y10.datenumbers)
y20Frame= pd.DataFrame({'y20':y20.data},index=y20.datenumbers)
y30Frame= pd.DataFrame({'y30':y30.data},index=y30.datenumbers)
df = pd.concat([y1mFrame,y3mFrame,y6mFrame,y1Frame,y5Frame,y10Frame,y20Frame,y30Frame],axis=1)
# Drop all days for which all rates are nan (national holidays, etc)
df = df.dropna(how='all')
df.head()
df.to_csv('data.csv')
# In[4]:
# Create a dictionary that records rate data available during specific date ranges
the_dict = {}
d0 = df.index[0]
intervalNumber = 1
for i,ind in enumerate(df.index):
criterion = collections.Counter(df.loc[ind].dropna().index) == collections.Counter(df.loc[d0].dropna().index)
if criterion ==False:
the_dict['interval '+str(intervalNumber)] = [d0,df.index[i-1],df.loc[d0].dropna().index]
d0 = ind
intervalNumber+=1
the_dict['interval '+str(intervalNumber)] = [d0,df.index[i],df.loc[d0].dropna().index]
# dictionary to link data fram column values to y-axis values
yaxis_dict = {'y1m':0, 'y3m':1, 'y6m':2, 'y1':3, 'y5':4, 'y10':5, 'y20':6, 'y30':7}
# In[5]:
# Set plot properties and define a date locator
font = {'weight' : 'bold',
'size' : 40}
rc('font', **font)
years1,years2,years5,years10= [mdates.YearLocator(tspace) for tspace in [1,2,5,10]]
# Create the figure
fig = plt.figure(figsize=(44,33))
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim([mdates.date2num(ex) for ex in df.index][0],[mdates.date2num(ex) for ex in df.index][-1])
ax.set_zlim([0,10])
# Iterate over the different date ranges
for key in the_dict.keys():
# Create the subset of the dataframe
start = the_dict[key][0]
end = the_dict[key][1]
cols = the_dict[key][2]
df_subset = df[cols].loc[start:end]
# Add extra columns to interpolate
nExtra=7
tempNumber =0
nColumns = len(df_subset.columns)
originalColumns = df_subset.columns
for j in range(nColumns-1):
for k in range(nExtra):
df_subset.insert(loc=nColumns-j-1,column='tmp '+str(tempNumber),value = np.nan*df_subset[df_subset.columns[0]])
tempNumber +=1
df_subset = df_subset.interpolate(axis=1)
# y-axis values
yAxisValues = []
for k,col in enumerate(originalColumns[:-1]):
yAxisValues.append(yaxis_dict[col])
for l in range(nExtra):
yAxisValues.append(yaxis_dict[col]+(l+1)*(yaxis_dict[originalColumns[k+1]]-yaxis_dict[originalColumns[k]])/(nExtra+1))
yAxisValues.append(yaxis_dict[originalColumns[-1]])
# Create the plot
x,y = np.meshgrid([mdates.date2num(ex) for ex in df_subset.index], yAxisValues)
z = np.transpose(df_subset.values)
ax.plot_surface(x, y,z,lw=0,rstride=1,cstride=1,norm=colors.Normalize(vmin=-3, vmax=z.max()),cmap=cm.Blues)
ax.plot(x[0], y[0],df_subset[df_subset.columns[0]],lw=4,color='k')
xtick_locator = mdates.AutoDateLocator()
xtick_formatter = mdates.AutoDateFormatter(xtick_locator)
ax.xaxis.set_major_locator(years2)
ax.xaxis.set_major_formatter(xtick_formatter)
ax.view_init(elev=35, azim=None)
ax.set_zlim([0,6])
ax.set_yticks([0,1,2,3,4,5,6,7])
ax.set_yticklabels(['1m','3m','6m','1y','5y','10y','20y','30y'])
ax.xaxis._axinfo['tick']['inward_factor'] = 0
ax.yaxis._axinfo['tick']['inward_factor'] = 0
ax.zaxis._axinfo['tick']['inward_factor'] = 0
[t.set_va('center') for t in ax.get_yticklabels()]
[t.set_ha('left') for t in ax.get_yticklabels()]
[t.set_va('center') for t in ax.get_xticklabels()]
[t.set_ha('right') for t in ax.get_xticklabels()]
[t.set_va('center') for t in ax.get_zticklabels()]
[t.set_ha('left') for t in ax.get_zticklabels()]
ax.set_xlabel('Date', labelpad=60.)
ax.set_ylabel('Time to maturity', labelpad=60.)
ax.set_zlabel('Percent', labelpad=45.)
fig.tight_layout()
plt.savefig('yield_curve_surface.png',bbox_inches='tight',dpi=120)
| 29.522989
| 130
| 0.687561
|
db9a9265a528295d1262373c8ea665c11701f58d
| 5,768
|
py
|
Python
|
tensorflow/python/data/kernel_tests/zip_test.py
|
ashutom/tensorflow-upstream
|
c16069c19de9e286dd664abb78d0ea421e9f32d4
|
[
"Apache-2.0"
] | 8
|
2021-08-03T03:57:10.000Z
|
2021-12-13T01:19:02.000Z
|
tensorflow/python/data/kernel_tests/zip_test.py
|
ashutom/tensorflow-upstream
|
c16069c19de9e286dd664abb78d0ea421e9f32d4
|
[
"Apache-2.0"
] | 17
|
2021-08-12T19:38:42.000Z
|
2022-01-27T14:39:35.000Z
|
tensorflow/python/data/kernel_tests/zip_test.py
|
ashutom/tensorflow-upstream
|
c16069c19de9e286dd664abb78d0ea421e9f32d4
|
[
"Apache-2.0"
] | 3
|
2021-09-26T22:20:25.000Z
|
2021-09-26T23:07:13.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.zip()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
def _dataset_factory(components):
datasets = tuple([
dataset_ops.Dataset.from_tensor_slices(component)
for component in components
])
return dataset_ops.Dataset.zip(datasets)
class ZipTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testZipEqual(self):
components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
get_next = self.getNext(_dataset_factory(components))
for i in range(4):
results = self.evaluate(get_next())
for component, result_component in zip(components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testZipUnequal(self):
components = [[1, 2, 3, 4], [1, 2, 3, 4, 5], [1.0, 2.0]]
get_next = self.getNext(_dataset_factory(components))
for i in range(2):
results = self.evaluate(get_next())
for component, result_component in zip(components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testNested(self):
components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component)
for component in components
]
dataset = dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2])))
self.assertEqual(
dataset_ops.get_legacy_output_shapes(dataset),
(tensor_shape.TensorShape([20]),
(tensor_shape.TensorShape([22]), tensor_shape.TensorShape([]))))
get_next = self.getNext(dataset)
for i in range(4):
result1, (result2, result3) = self.evaluate(get_next())
self.assertAllEqual(components[0][i], result1)
self.assertAllEqual(components[1][i], result2)
self.assertAllEqual(components[2][i], result3)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testNamedTuple(self):
Foo = collections.namedtuple("Foo", ["x", "y"])
x = Foo(x=dataset_ops.Dataset.range(3), y=dataset_ops.Dataset.range(3, 6))
dataset = dataset_ops.Dataset.zip(x)
expected = [Foo(x=0, y=3), Foo(x=1, y=4), Foo(x=2, y=5)]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testAttrs(self):
if attr is None:
self.skipTest("attr module is not available.")
@attr.s
class Foo(object):
x = attr.ib()
y = attr.ib()
x = Foo(x=dataset_ops.Dataset.range(3), y=dataset_ops.Dataset.range(3, 6))
dataset = dataset_ops.Dataset.zip(x)
expected = [Foo(x=0, y=3), Foo(x=1, y=4), Foo(x=2, y=5)]
self.assertDatasetProduces(dataset, expected)
class ZipCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self, arr):
components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array(arr)
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component)
for component in components
]
return dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2])))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(elements=[[37.0, 38.0, 39.0, 40.0], [1.0, 2.0]]))
)
def test(self, verify_fn, elements):
verify_fn(self, lambda: self._build_dataset(elements), len(elements))
if __name__ == "__main__":
test.main()
| 36.05
| 80
| 0.682732
|
e84c616d690a210b314e314a0e2b6a856b4bd364
| 2,813
|
py
|
Python
|
setup.py
|
sjiekak/cvxpy
|
4c16ee865d7721160fe9bd41d432f95ee6fe80f8
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2015-06-03T01:33:46.000Z
|
2021-11-15T01:48:49.000Z
|
setup.py
|
zhm-real/cvxpy
|
370bdb24849014bf2fbd24980eaf98a133455b60
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-10-22T07:46:38.000Z
|
2020-10-22T07:46:38.000Z
|
setup.py
|
sjiekak/cvxpy
|
4c16ee865d7721160fe9bd41d432f95ee6fe80f8
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-10-22T01:35:58.000Z
|
2022-01-19T10:48:51.000Z
|
import distutils.sysconfig
import distutils.version
import os
import platform
import sys
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
# inject numpy headers
class build_ext_cvxpy(build_ext):
def finalize_options(self):
build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
# `__builtins__` can be a dict
# see https://docs.python.org/2/reference/executionmodel.html
if isinstance(__builtins__, dict):
__builtins__['__NUMPY_SETUP__'] = False
else:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
def is_platform_mac():
return sys.platform == 'darwin'
# For mac, ensure extensions are built for macos 10.9 when compiling on a
# 10.9 system or above, overriding distutils behaviour which is to target
# the version that python was built for. This may be overridden by setting
# MACOSX_DEPLOYMENT_TARGET before calling setup.py. This behavior is
# motivated by Apple dropping support for libstdc++.
if is_platform_mac():
if 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:
current_system = distutils.version.LooseVersion(platform.mac_ver()[0])
python_target = distutils.version.LooseVersion(
distutils.sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET'))
if python_target < '10.9' and current_system >= '10.9':
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
canon = Extension(
'_cvxcore',
sources=['cvxpy/cvxcore/src/cvxcore.cpp',
'cvxpy/cvxcore/src/LinOpOperations.cpp',
'cvxpy/cvxcore/src/Utils.cpp',
'cvxpy/cvxcore/python/cvxcore_wrap.cpp'],
include_dirs=['cvxpy/cvxcore/src/',
'cvxpy/cvxcore/python/',
'cvxpy/cvxcore/include/Eigen'],
extra_compile_args=['-O3'],
)
setup(
name='cvxpy',
version='1.1.3',
author='Steven Diamond, Eric Chu, Stephen Boyd',
author_email='stevend2@stanford.edu, akshayka@cs.stanford.edu, '
'echu508@stanford.edu, boyd@stanford.edu',
cmdclass={'build_ext': build_ext_cvxpy},
ext_modules=[canon],
packages=find_packages(exclude=["cvxpy.performance_tests"]),
url='http://github.com/cvxgrp/cvxpy/',
license='Apache License, Version 2.0',
zip_safe=False,
description='A domain-specific language for modeling convex optimization '
'problems in Python.',
python_requires='>=3.5',
install_requires=["osqp >= 0.4.1",
"ecos >= 2",
"scs >= 1.1.3",
"numpy >= 1.15",
"scipy >= 1.1.0"],
setup_requires=["numpy >= 1.15"],
)
| 35.607595
| 78
| 0.65695
|
1824c1694c81443e2f4b7f317de0dc166d1d52d5
| 4,635
|
py
|
Python
|
openstates/openstates-master/openstates/ri/events.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/ri/events.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
openstates/openstates-master/openstates/ri/events.py
|
Jgorsick/Advocacy_Angular
|
8906af3ba729b2303880f319d52bce0d6595764c
|
[
"CC-BY-4.0"
] | null | null | null |
import datetime as dt
from billy.scrape import NoDataForPeriod
from billy.scrape.events import Event, EventScraper
from openstates.utils import LXMLMixin
import lxml.html
import pytz
agenda_url = "http://status.rilin.state.ri.us/agendas.aspx"
column_order = {
"upper" : 1,
"other" : 2,
"lower" : 0
}
all_day = [ # ugh, hack
"Rise of the House",
"Rise of the Senate",
"Rise of the House & Senate"
]
replace = {
"House Joint Resolution No." : "HJR",
"House Resolution No." : "HR",
"House Bill No." : "HB",
"Senate Joint Resolution No." : "SJR",
"Senate Resolution No." : "SR",
"Senate Bill No." : "SB",
u"\xa0" : " ",
"SUB A" : "",
"SUB A as amended": ""
}
class RIEventScraper(EventScraper, LXMLMixin):
jurisdiction = 'ri'
_tz = pytz.timezone('US/Eastern')
def scrape_agenda(self, url, session):
page = self.lxmlize(url)
# Get the date/time info:
date_time = page.xpath("//table[@class='time_place']")
if date_time == []:
return
date_time = date_time[0]
lines = date_time.xpath("./tr")
metainf = {}
for line in lines:
tds = line.xpath("./td")
metainf[tds[0].text_content()] = tds[1].text_content()
date = metainf['DATE:']
time = metainf['TIME:']
where = metainf['PLACE:']
fmts = [
"%A, %B %d, %Y",
"%A, %B %d, %Y %I:%M %p",
"%A, %B %d, %Y %I:%M",
]
kwargs = {}
if time in all_day:
datetime = date
else:
datetime = "%s %s" % ( date, time )
if "CANCELLED" in datetime.upper():
return
event_desc = "Meeting Notice"
if "Rise of the" in datetime:
datetime = date
kwargs["all_day"] = True
event_desc = "Meeting Notice: Starting at {}".format(time)
transtable = {
"P.M" : "PM",
"PM." : "PM",
"P.M." : "PM",
"A.M." : "AM",
"POSTPONED" : "",
"RESCHEDULED": "",
"and Rise of the Senate": "",
}
for trans in transtable:
datetime = datetime.replace(trans, transtable[trans])
datetime = datetime.strip()
for fmt in fmts:
try:
datetime = dt.datetime.strptime(datetime, fmt)
break
except ValueError:
continue
event = Event(session, datetime, 'committee:meeting',
event_desc, location=where, **kwargs)
event.add_source(url)
# aight. Let's get us some bills!
bills = page.xpath("//b/a")
for bill in bills:
bill_ft = bill.attrib['href']
event.add_document(bill.text_content(), bill_ft, type="full-text",
mimetype="application/pdf")
root = bill.xpath('../../*')
root = [ x.text_content() for x in root ]
bill_id = "".join(root)
if "SCHEDULED FOR" in bill_id:
continue
descr = bill.getparent().getparent().getparent().getnext().getnext(
).text_content()
for thing in replace:
bill_id = bill_id.replace(thing, replace[thing])
event.add_related_bill(bill_id,
description=descr,
type='consideration')
committee = page.xpath("//span[@id='lblSession']")[0].text_content()
chambers = {
"house" : "lower",
"joint" : "joint",
"senate" : "upper"
}
chamber = "other"
for key in chambers:
if key in committee.lower():
chamber = chambers[key]
event.add_participant("host", committee, 'committee', chamber=chamber)
self.save_event(event)
def scrape_agenda_dir(self, url, session):
page = self.lxmlize(url)
rows = page.xpath("//table[@class='agenda_table']/tr")[2:]
for row in rows:
url = row.xpath("./td")[-1].xpath(".//a")[0]
self.scrape_agenda(url.attrib['href'], session)
def scrape(self, chamber, session):
offset = column_order[chamber]
page = self.lxmlize(agenda_url)
rows = page.xpath("//table[@class='agenda_table']/tr")[1:]
for row in rows:
ctty = row.xpath("./td")[offset]
to_scrape = ctty.xpath("./a")
for page in to_scrape:
self.scrape_agenda_dir(page.attrib['href'], session)
| 30.097403
| 79
| 0.510464
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.