hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
09f68ac9516db807bd112b0783e36067ee23def6
| 373
|
py
|
Python
|
Algorithms/Root Algorithms/hash.py
|
Ahmad-Fahad/Python
|
5a5f8f3395f7085947430b8309f6af70b2e25a77
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/Root Algorithms/hash.py
|
Ahmad-Fahad/Python
|
5a5f8f3395f7085947430b8309f6af70b2e25a77
|
[
"Apache-2.0"
] | null | null | null |
Algorithms/Root Algorithms/hash.py
|
Ahmad-Fahad/Python
|
5a5f8f3395f7085947430b8309f6af70b2e25a77
|
[
"Apache-2.0"
] | null | null | null |
intgr = 11
flt = 11.11
stng = "Eleven"
tple = (1,2)
lst = [1,2,3,4,5]
h_i = hash(intgr)
h_f = hash(flt)
h_s = hash(stng)
h_t = hash(tple)
#h_l = hash(lst)
print("hash of {} is {} ".format(intgr,h_i))
print("hash of {} is {} ".format(flt,h_f))
print("hash of {} is {} ".format(stng,h_s))
print("hash of {} is {} ".format(tple,h_t))
# print(h_l) list hash hoy na
| 21.941176
| 44
| 0.581769
|
intgr = 11
flt = 11.11
stng = "Eleven"
tple = (1,2)
lst = [1,2,3,4,5]
h_i = hash(intgr)
h_f = hash(flt)
h_s = hash(stng)
h_t = hash(tple)
#h_l = hash(lst)
print("hash of {} is {} ".format(intgr,h_i))
print("hash of {} is {} ".format(flt,h_f))
print("hash of {} is {} ".format(stng,h_s))
print("hash of {} is {} ".format(tple,h_t))
# print(h_l) list hash hoy na
| 0
| 0
| 0
|
43491ee16935885c90b9bd5ca040626a1f97ff54
| 876
|
py
|
Python
|
examples/line_performance.py
|
brisvag/pygfx
|
02e1f5ff92f50899ca990b712016e10d2914fcaf
|
[
"BSD-2-Clause"
] | 60
|
2020-06-20T09:25:37.000Z
|
2022-03-31T23:06:30.000Z
|
examples/line_performance.py
|
brisvag/pygfx
|
02e1f5ff92f50899ca990b712016e10d2914fcaf
|
[
"BSD-2-Clause"
] | 190
|
2020-06-17T09:11:43.000Z
|
2022-03-31T17:42:19.000Z
|
examples/line_performance.py
|
brisvag/pygfx
|
02e1f5ff92f50899ca990b712016e10d2914fcaf
|
[
"BSD-2-Clause"
] | 2
|
2021-03-04T08:19:14.000Z
|
2022-01-02T16:31:15.000Z
|
"""
Display a line depicting a noisy signal consisting of a lot of points.
"""
import numpy as np
import pygfx as gfx
from PySide6 import QtWidgets
from wgpu.gui.qt import WgpuCanvas
app = QtWidgets.QApplication([])
canvas = WgpuCanvas()
renderer = gfx.WgpuRenderer(canvas)
scene = gfx.Scene()
# todo: crank this to 1M when wgpu allows it :D
x = np.linspace(0, 100, 10_000, dtype=np.float32)
y = np.sin(x) * 30 + np.random.normal(0, 5, len(x)).astype(np.float32)
positions = np.column_stack([x, y, np.zeros_like(x)])
geometry = gfx.Geometry(positions=positions)
material = gfx.LineMaterial(thickness=2.0, color=(0.0, 0.7, 0.3, 1.0))
line = gfx.Line(geometry, material)
scene.add(line)
camera = gfx.OrthographicCamera(110, 110)
camera.position.set(50, 0, 0)
if __name__ == "__main__":
canvas.request_draw(lambda: renderer.render(scene, camera))
app.exec()
| 23.052632
| 70
| 0.715753
|
"""
Display a line depicting a noisy signal consisting of a lot of points.
"""
import numpy as np
import pygfx as gfx
from PySide6 import QtWidgets
from wgpu.gui.qt import WgpuCanvas
app = QtWidgets.QApplication([])
canvas = WgpuCanvas()
renderer = gfx.WgpuRenderer(canvas)
scene = gfx.Scene()
# todo: crank this to 1M when wgpu allows it :D
x = np.linspace(0, 100, 10_000, dtype=np.float32)
y = np.sin(x) * 30 + np.random.normal(0, 5, len(x)).astype(np.float32)
positions = np.column_stack([x, y, np.zeros_like(x)])
geometry = gfx.Geometry(positions=positions)
material = gfx.LineMaterial(thickness=2.0, color=(0.0, 0.7, 0.3, 1.0))
line = gfx.Line(geometry, material)
scene.add(line)
camera = gfx.OrthographicCamera(110, 110)
camera.position.set(50, 0, 0)
if __name__ == "__main__":
canvas.request_draw(lambda: renderer.render(scene, camera))
app.exec()
| 0
| 0
| 0
|
c9e4f85d31f172455a07017ac64f346ecb9cacb0
| 275
|
py
|
Python
|
rocket_erp/apps/api/serializers/accounts.py
|
dimmy2000/rocket-erp
|
3accc0f1a1fa4faafd9165064c8d20abbb745324
|
[
"MIT"
] | null | null | null |
rocket_erp/apps/api/serializers/accounts.py
|
dimmy2000/rocket-erp
|
3accc0f1a1fa4faafd9165064c8d20abbb745324
|
[
"MIT"
] | null | null | null |
rocket_erp/apps/api/serializers/accounts.py
|
dimmy2000/rocket-erp
|
3accc0f1a1fa4faafd9165064c8d20abbb745324
|
[
"MIT"
] | 2
|
2021-06-15T14:37:36.000Z
|
2021-06-15T14:39:24.000Z
|
from rest_framework.serializers import ModelSerializer
from rocket_erp.apps.accounts.models import Account
class AccountSerializer(ModelSerializer):
"""Serializer for account app, with all fields."""
| 22.916667
| 54
| 0.734545
|
from rest_framework.serializers import ModelSerializer
from rocket_erp.apps.accounts.models import Account
class AccountSerializer(ModelSerializer):
"""Serializer for account app, with all fields."""
class Meta:
model = Account
fields = "__all__"
| 0
| 41
| 27
|
4c6963d54922e9ad237b55a1a506357629e1b468
| 5,866
|
py
|
Python
|
tests/fit_param.py
|
harmslab/likelihood
|
3b6864631548c6fe989d3729fc150c76eac8928e
|
[
"MIT"
] | 3
|
2020-04-30T21:55:49.000Z
|
2020-04-30T22:14:42.000Z
|
tests/fit_param.py
|
harmslab/likelihood
|
3b6864631548c6fe989d3729fc150c76eac8928e
|
[
"MIT"
] | null | null | null |
tests/fit_param.py
|
harmslab/likelihood
|
3b6864631548c6fe989d3729fc150c76eac8928e
|
[
"MIT"
] | 1
|
2020-04-30T21:55:08.000Z
|
2020-04-30T21:55:08.000Z
|
__description__ = \
"""
Main class for holding fit parameters, including guesses, values, ranges, etc.
"""
__date__ = "2016-09-02"
__author__ = "Michael J. Harms"
import copy
import numpy as np
class FitParameter:
"""
Class for storing and manipulating generic fit parameters.
"""
def __init__(self,name,guess=None,fixed=False,bounds=None,alias=None):
"""
Initialize class. Parameters:
name: name of parameter (string)
guess: parameter guess (float). If None, class will guess intelligently
based on the parameter name. If no intelligent guess is available,
guess will be set to 1.0.
fixed: whether or not the parameter is fixed (bool)
bounds: bounds on fit for parameter (list-like object of 2 floats). If
None, bounds will be set to (None,None). If (None,5), no lower
bound, upper bound of 5.
alias: alias for parameter name, for linking to global paramter names. (str)
If None, no alias is made.
"""
self.name = name
self.guess = guess
self.fixed = fixed
self.bounds = bounds
self.alias = alias
#--------------------------------------------------------------------------
# parameter name
@property
def name(self):
"""
Name of the parameter.
"""
try:
return self._name
except AttributeError:
return None
#--------------------------------------------------------------------------
# parameter value
@property
def value(self):
"""
Value of the parameter.
"""
try:
return self._value
except AttributeError:
return None
@value.setter
def value(self,value=None):
"""
If value is set to None, set value to self.guess value.
"""
if value is None:
self._value = self.guess
else:
try:
value = np.float(value)
except ValueError:
err = f"parameter value '{value}' cannot be interpretable as a float\n"
raise ValueError(err)
self._value = value
#--------------------------------------------------------------------------
# parameter stdev
@property
def stdev(self):
"""
Standard deviation on the parameter.
"""
return self._stdev
@stdev.setter
def stdev(self,s):
"""
Set the standard deviation of the parameter.
"""
self._stdev = s
#--------------------------------------------------------------------------
# parameter 95% confidence
@property
def ninetyfive(self):
"""
95% confidence interval on the parameter.
"""
return self._ninetyfive
@ninetyfive.setter
def ninetyfive(self,value):
"""
Set the 95% confidence interval on the parameter.
"""
if len(value) != 2:
err = "ninetyfive requires a list-like with length 2.\n"
raise ValueError(err)
self._ninetyfive[0] = value[0]
self._ninetyfive[1] = value[1]
#--------------------------------------------------------------------------
# parameter guess
@property
def guess(self):
"""
Guess for the parameter.
"""
return self._guess
@guess.setter
def guess(self,g):
"""
Set the guess. If None, choose intelligently based on the name of the
parameter.
"""
if g != None:
self._guess = g
else:
if self.name.startswith("dH"):
self._guess = 1000.0
elif self.name.startswith("beta") or self.name.startswith("K"):
self._guess = 1e6
elif self.name.startswith("fx"):
self._guess = 1.0
else:
self._guess = 1.0
self._value = self._guess
#--------------------------------------------------------------------------
# parameter fixed-ness.
@property
def fixed(self):
"""
Whether or not the parameter if fixed.
"""
return self._fixed
@fixed.setter
def fixed(self,bool_value):
"""
Fix or unfix the parameter.
"""
self._fixed = bool(bool_value)
self._initialize_fit_results()
#--------------------------------------------------------------------------
# bounds for fit.
@property
def bounds(self):
"""
Fit bounds. Either list of bounds or None.
"""
return self._bounds
@bounds.setter
def bounds(self,b):
"""
Set fit bounds.
"""
if b != None:
try:
if len(b) != 2:
raise TypeError
except TypeError:
err = "Bounds must be list-like object of length 2\n"
raise ValueError(err)
self._bounds = tuple(copy.deepcopy(b))
else:
self._bounds = (-np.inf,np.inf)
#--------------------------------------------------------------------------
# parameter alias
@property
def alias(self):
"""
Parameter alias. Either string or None.
"""
return self._alias
@alias.setter
def alias(self,a):
"""
Set alias.
"""
try:
if self._alias != None and self._alias != a and a != None:
err = "Could not set alias to {:} because it is already set to {:}".format(a,self._alias)
raise ValueError(err)
except AttributeError:
pass
self._alias = a
| 24.647059
| 105
| 0.468121
|
__description__ = \
"""
Main class for holding fit parameters, including guesses, values, ranges, etc.
"""
__date__ = "2016-09-02"
__author__ = "Michael J. Harms"
import copy
import numpy as np
class FitParameter:
"""
Class for storing and manipulating generic fit parameters.
"""
def __init__(self,name,guess=None,fixed=False,bounds=None,alias=None):
"""
Initialize class. Parameters:
name: name of parameter (string)
guess: parameter guess (float). If None, class will guess intelligently
based on the parameter name. If no intelligent guess is available,
guess will be set to 1.0.
fixed: whether or not the parameter is fixed (bool)
bounds: bounds on fit for parameter (list-like object of 2 floats). If
None, bounds will be set to (None,None). If (None,5), no lower
bound, upper bound of 5.
alias: alias for parameter name, for linking to global paramter names. (str)
If None, no alias is made.
"""
self.name = name
self.guess = guess
self.fixed = fixed
self.bounds = bounds
self.alias = alias
#--------------------------------------------------------------------------
# parameter name
@property
def name(self):
"""
Name of the parameter.
"""
try:
return self._name
except AttributeError:
return None
def name(self,name):
self._name = str(name)
#--------------------------------------------------------------------------
# parameter value
@property
def value(self):
"""
Value of the parameter.
"""
try:
return self._value
except AttributeError:
return None
@value.setter
def value(self,value=None):
"""
If value is set to None, set value to self.guess value.
"""
if value is None:
self._value = self.guess
else:
try:
value = np.float(value)
except ValueError:
err = f"parameter value '{value}' cannot be interpretable as a float\n"
raise ValueError(err)
self._value = value
#--------------------------------------------------------------------------
# parameter stdev
@property
def stdev(self):
"""
Standard deviation on the parameter.
"""
return self._stdev
@stdev.setter
def stdev(self,s):
"""
Set the standard deviation of the parameter.
"""
self._stdev = s
#--------------------------------------------------------------------------
# parameter 95% confidence
@property
def ninetyfive(self):
"""
95% confidence interval on the parameter.
"""
return self._ninetyfive
@ninetyfive.setter
def ninetyfive(self,value):
"""
Set the 95% confidence interval on the parameter.
"""
if len(value) != 2:
err = "ninetyfive requires a list-like with length 2.\n"
raise ValueError(err)
self._ninetyfive[0] = value[0]
self._ninetyfive[1] = value[1]
#--------------------------------------------------------------------------
# parameter guess
@property
def guess(self):
"""
Guess for the parameter.
"""
return self._guess
@guess.setter
def guess(self,g):
"""
Set the guess. If None, choose intelligently based on the name of the
parameter.
"""
if g != None:
self._guess = g
else:
if self.name.startswith("dH"):
self._guess = 1000.0
elif self.name.startswith("beta") or self.name.startswith("K"):
self._guess = 1e6
elif self.name.startswith("fx"):
self._guess = 1.0
else:
self._guess = 1.0
self._value = self._guess
#--------------------------------------------------------------------------
# parameter fixed-ness.
@property
def fixed(self):
"""
Whether or not the parameter if fixed.
"""
return self._fixed
@fixed.setter
def fixed(self,bool_value):
"""
Fix or unfix the parameter.
"""
self._fixed = bool(bool_value)
self._initialize_fit_results()
#--------------------------------------------------------------------------
# bounds for fit.
@property
def bounds(self):
"""
Fit bounds. Either list of bounds or None.
"""
return self._bounds
@bounds.setter
def bounds(self,b):
"""
Set fit bounds.
"""
if b != None:
try:
if len(b) != 2:
raise TypeError
except TypeError:
err = "Bounds must be list-like object of length 2\n"
raise ValueError(err)
self._bounds = tuple(copy.deepcopy(b))
else:
self._bounds = (-np.inf,np.inf)
#--------------------------------------------------------------------------
# parameter alias
@property
def alias(self):
"""
Parameter alias. Either string or None.
"""
return self._alias
@alias.setter
def alias(self,a):
"""
Set alias.
"""
try:
if self._alias != None and self._alias != a and a != None:
err = "Could not set alias to {:} because it is already set to {:}".format(a,self._alias)
raise ValueError(err)
except AttributeError:
pass
self._alias = a
| 31
| 0
| 27
|
bde9bf927b8b96816899007c7fd3f79cd619b67c
| 12,907
|
py
|
Python
|
gridworld/agents/buildings/five_zone_rom_env.py
|
NREL/PowerGridworld
|
2f72ac5bb663092ca806c6fff9c7cf70f94fd775
|
[
"BSD-3-Clause"
] | 24
|
2021-11-12T03:42:38.000Z
|
2022-02-27T17:22:30.000Z
|
gridworld/agents/buildings/five_zone_rom_env.py
|
NREL/PowerGridworld
|
2f72ac5bb663092ca806c6fff9c7cf70f94fd775
|
[
"BSD-3-Clause"
] | 4
|
2021-11-11T03:27:58.000Z
|
2021-11-15T23:12:05.000Z
|
gridworld/agents/buildings/five_zone_rom_env.py
|
NREL/PowerGridworld
|
2f72ac5bb663092ca806c6fff9c7cf70f94fd775
|
[
"BSD-3-Clause"
] | 2
|
2022-02-09T09:15:41.000Z
|
2022-02-24T14:56:40.000Z
|
from abc import abstractmethod
from collections import OrderedDict
import os
import pickle
import re
from typing import Tuple, Union
import pandas as pd
import numpy as np
import gym
from gridworld.log import logger
from gridworld import ComponentEnv
from gridworld.utils import to_scaled, to_raw, maybe_rescale_box_space
from gridworld.agents.buildings.obs_space import make_obs_space
from gridworld.agents.buildings import defaults
from gridworld.agents.buildings import five_zone_rom_dynamics as dyn
# Below are control variables' boundary.
MAX_FLOW_RATE = [2.2, 2.2, 2.2, 2.2, 3.2] # Max flow rate for each individual zone
MIN_FLOW_RATE = [.22, .22, .22, .22, .32] # Max flow rate for each individual zone
MAX_TOTAL_FLOW_RATE = 10.0 # Total flow rate for all zones should be lower than 10 kg/sec.
MAX_DISCHARGE_TEMP = 16.0 # Max temp of air leaving chiller
MIN_DISCHARGE_TEMP = 10.0 # Min temp of air leaving chiller
DEFAULT_COMFORT_BOUNDS = (22., 28.) # Temps between these values are considered "comfortable"
def load_data(start_time: str = None, end_time: str = None) -> Tuple[pd.DataFrame, dict]:
"""Returns exogenous data dataframe, and state space model (per-zone) dict."""
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
df = pd.read_csv(os.path.join(THIS_DIR, "data/exogenous_data.csv"), index_col=0)
df.index = pd.DatetimeIndex(df.index)
start_time = pd.Timestamp(start_time) if start_time else df.index[0]
end_time = pd.Timestamp(end_time) if end_time else df.index[-1]
_df = df.loc[start_time:end_time]
if _df is None or len(_df) == 0:
raise ValueError(
f"start and/or end times ({start_time}, {end_time}) " +
"resulted in empty dataframe. First and last indices are " +
f"({df.index[0]}, {df.index[-1]}), choose values in this range.")
with open(os.path.join(THIS_DIR, "data/state_space_model.p"), "rb") as f:
models = pickle.load(f)
return _df, models
def get_col(df, pattern, index=None):
"""Returns a dataframe with columns matching regex pattern."""
return df[[c for c in df.columns if re.match(pattern, c)]].values
class FiveZoneROMThermalEnergyEnv(FiveZoneROMEnv):
"""Subclass with identical physics, but that balances energy and comfort costs."""
def step_reward(self) -> Tuple[float, dict]:
"""Overwriting reward to balance energy and comfort."""
alpha = 0.2
energy_consumption_reward = -self.state["p_consumed"] / 12.0
comfort_error = [
max(self.state["zone_upper_viol_{}".format(i)], self.state["zone_lower_viol_{}".format(i)], 0.0)
for i in range(self.num_zones)
]
comfort_reward = -(sum([x**2 for x in comfort_error]))
reward = alpha * energy_consumption_reward * 0.5 + (1. - alpha) * comfort_reward
meta = {
"comfort_rew": comfort_reward,
"energy_rew": energy_consumption_reward
}
return reward, meta
if __name__ == '__main__':
env = FiveZoneROMThermalEnergyEnv()
obs = env.reset()
print(obs)
| 37.520349
| 108
| 0.643837
|
from abc import abstractmethod
from collections import OrderedDict
import os
import pickle
import re
from typing import Tuple, Union
import pandas as pd
import numpy as np
import gym
from gridworld.log import logger
from gridworld import ComponentEnv
from gridworld.utils import to_scaled, to_raw, maybe_rescale_box_space
from gridworld.agents.buildings.obs_space import make_obs_space
from gridworld.agents.buildings import defaults
from gridworld.agents.buildings import five_zone_rom_dynamics as dyn
# Below are control variables' boundary.
MAX_FLOW_RATE = [2.2, 2.2, 2.2, 2.2, 3.2] # Max flow rate for each individual zone
MIN_FLOW_RATE = [.22, .22, .22, .22, .32] # Max flow rate for each individual zone
MAX_TOTAL_FLOW_RATE = 10.0 # Total flow rate for all zones should be lower than 10 kg/sec.
MAX_DISCHARGE_TEMP = 16.0 # Max temp of air leaving chiller
MIN_DISCHARGE_TEMP = 10.0 # Min temp of air leaving chiller
DEFAULT_COMFORT_BOUNDS = (22., 28.) # Temps between these values are considered "comfortable"
def load_data(start_time: str = None, end_time: str = None) -> Tuple[pd.DataFrame, dict]:
"""Returns exogenous data dataframe, and state space model (per-zone) dict."""
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
df = pd.read_csv(os.path.join(THIS_DIR, "data/exogenous_data.csv"), index_col=0)
df.index = pd.DatetimeIndex(df.index)
start_time = pd.Timestamp(start_time) if start_time else df.index[0]
end_time = pd.Timestamp(end_time) if end_time else df.index[-1]
_df = df.loc[start_time:end_time]
if _df is None or len(_df) == 0:
raise ValueError(
f"start and/or end times ({start_time}, {end_time}) " +
"resulted in empty dataframe. First and last indices are " +
f"({df.index[0]}, {df.index[-1]}), choose values in this range.")
with open(os.path.join(THIS_DIR, "data/state_space_model.p"), "rb") as f:
models = pickle.load(f)
return _df, models
def get_col(df, pattern, index=None):
"""Returns a dataframe with columns matching regex pattern."""
return df[[c for c in df.columns if re.match(pattern, c)]].values
class FiveZoneROMEnv(ComponentEnv):
time: pd.Timestamp = None
time_index: int = None
raw_action: np.ndarray = None
state: OrderedDict = None
def __init__(
self,
name: str = None,
obs_config: dict = None,
start_time: Union[str, pd.Timestamp] = None,
end_time: Union[str, pd.Timestamp] = None,
comfort_bounds: Union[tuple, np.ndarray, pd.DataFrame] = None,
zone_temp_init: np.ndarray = None,
max_episode_steps: int = None,
rescale_spaces: bool = True,
**kwargs
):
super().__init__(name=name)
self.rescale_spaces = rescale_spaces
self.num_zones = 5
self.obs_config = obs_config if obs_config is not None else defaults.obs_config
# Set the initial zone temperature profile.
if zone_temp_init is not None:
self.zone_temp_init = zone_temp_init.copy()
else:
self.zone_temp_init = 27. * np.ones(self.num_zones, dtype=np.float64)
# Load exogenous and model data.
self.df, self.models = load_data(start_time, end_time)
# Configure max episode steps.
max_steps = self.df.shape[0] - 3 # due to filter update
if max_episode_steps is None:
self.max_episode_steps = max_steps
else:
self.max_episode_steps = min(max_episode_steps, max_steps)
# The default range on comfort bounds are (lowest of low, highest of high)
self.comfort_bounds = comfort_bounds if comfort_bounds is not None \
else DEFAULT_COMFORT_BOUNDS
# Action space: [zone_flows] + [discharge temp]
self.act_low = np.array(MIN_FLOW_RATE + [MIN_DISCHARGE_TEMP])
self.act_high = np.array(MAX_FLOW_RATE + [MAX_DISCHARGE_TEMP])
self._action_space = gym.spaces.Box(
low=self.act_low,
high=self.act_high,
dtype=np.float64
)
self.action_space = maybe_rescale_box_space(
self._action_space, rescale=self.rescale_spaces)
# State space is configured via obs_config.
self.comfort_bounds_df = self.make_comfort_bounds_df()
self._observation_space, self._obs_labels = make_obs_space(
self.num_zones, self.obs_config)
self.observation_space = maybe_rescale_box_space(
self._observation_space, rescale=self.rescale_spaces)
def make_comfort_bounds_df(self) -> pd.DataFrame:
"""Returns a dataframe containing upper and lower comfort bounds on the
zone temperatures."""
data = np.zeros((self.df.shape[0], 2))
if isinstance(self.comfort_bounds, tuple):
data[:, 0], data[:, 1] = self.comfort_bounds[0], self.comfort_bounds[1]
else:
data[:, 0] = self.comfort_bounds[:data.shape[0], 0]
data[:, 1] = self.comfort_bounds[:data.shape[0], 1]
return pd.DataFrame(data, columns=["temp_lb", "temp_ub"], index=self.df.index)
def _set_exogenous(self):
self.temp_oa = get_col(self.df, "T_oa")[self.time_index][0]
self.q_solar = get_col(self.df, "Q_solar")[self.time_index]
self.q_cool = get_col(self.df, "Q_cool_", )[self.time_index, :]
self.q_int = get_col(self.df, "Q_int")[self.time_index]
def reset(self, **obs_kwargs) -> np.ndarray:
"""Resets the environment to the initial state and returns this state."""
self.time_index = 0
self.time = self.df.index[self.time_index]
self.state = None
# Set initial state values and exogenous data.
self.zone_temp = self.zone_temp_init.copy()
self._set_exogenous()
self.p_consumed = 0.
# Build the u-vector given current state and exogenous data.
self.u = dyn.build_u_vector(
self.models,
zone_temp=self.zone_temp,
action=None,
temp_oa=self.temp_oa,
q_solar=self.q_solar,
q_int=self.q_int,
q_cool=self.q_cool
)
# Filter update x2.
for _ in range(2):
self.models = dyn.filter_update(
self.models, self.zone_temp, self.u)
# Update the zone temperatures based on the filter update.
self.zone_temp = dyn.temp_dynamics(self.models)
obs, _ = self.get_obs(**obs_kwargs)
return obs
def step(self, action: np.ndarray, **obs_kwargs) -> Tuple[np.ndarray, float, bool, dict]:
if self.rescale_spaces:
action = to_raw(action, self._action_space.low, self._action_space.high)
return self.step_(action, **obs_kwargs)
def step_(
self,
action: np.ndarray,
**obs_kwargs
) -> Tuple[np.ndarray, float, bool, dict]:
"""Applies the action to the system and takes a time step. Returns
the new state, stage reward, boolean to indicate whether state is terminal,
and dictionary of any desired metadata. In some settings, the p setpoint
will be updated exogenously."""
action = np.array(action).squeeze()
self.raw_action = action
# Advance the dynamics and update the model and state variables.
self.model, self.zone_temp = dyn.dynamics(
self.models,
self.zone_temp,
action,
self.temp_oa,
self.q_solar,
self.q_int
)
self.p_consumed = dyn.get_p_consumed(action, self.temp_oa)
# Get the reward
rew, _ = self.step_reward()
# Step in time and update the exogenous
self.time_index += 1
self.time = self.df.index[self.time_index]
self._set_exogenous()
# Call get_obs before returning so state dict is updated.
obs, state = self.get_obs(**obs_kwargs)
return np.array(obs), rew, self.is_terminal(), state
def get_obs(
self,
**obs_kwargs
) -> Tuple[np.ndarray, dict]:
"""Returns the current state, clipping the values as specified by the
gym observation space box constraints. Calling this method also updates
the state dict attribute for convenience."""
# Call the ROM model to get the new zone temps
# Compute the temperature violation per zone
temp_lb = self.comfort_bounds_df["temp_lb"][self.time].copy()
temp_ub = self.comfort_bounds_df["temp_ub"][self.time].copy()
zone_upper_temp_viol = np.zeros(self.num_zones, dtype=np.float64)
zone_lower_temp_viol = np.zeros(self.num_zones, dtype=np.float64)
for i, temp in enumerate(self.zone_temp):
# zone_temp_viol[i] = max(max(0, temp_lb - temp), max(0, temp - temp_ub))
# Positive violation is true violation while negative violation means margin.
zone_upper_temp_viol[i] = temp - temp_ub
zone_lower_temp_viol[i] = temp_lb - temp
# Add nominal values for bus_voltage and p_setpoint if not provided in kwargs
bus_voltage = obs_kwargs.get("bus_voltage")
p_setpoint = obs_kwargs.get("p_setpoint")
# Create a dict to record all possible state values. We can then filter
# them out using the obs_config when creating the obs array.
# TODO: Automate making sure state keys have same order as DEFAULT_OBS_CONFIG.
self.state = OrderedDict({"zone_temp_{}".format(k): v for k, v in enumerate(self.zone_temp)})
self.state.update({"zone_upper_viol_{}".format(k): v for k, v in enumerate(zone_upper_temp_viol)})
self.state.update({"zone_lower_viol_{}".format(k): v for k, v in enumerate(zone_lower_temp_viol)})
self.state.update({
"comfort_lower": temp_lb, # current comfort lower bound
"comfort_upper": temp_ub, # current comfort upper bound
"outdoor_temp": self.temp_oa, # current outdoor temp
"p_consumed": self.p_consumed, # current p consumed
"time_of_day": 1. * self.time_index / self.max_episode_steps, # time,
"bus_voltage": bus_voltage if bus_voltage is not None else 1.0,
"min_voltage": bus_voltage if bus_voltage is not None else 1.0,
"max_voltage": bus_voltage if bus_voltage is not None else 1.0,
"p_setpoint": p_setpoint if p_setpoint is not None else np.inf
})
self.state.update(obs_kwargs)
# Create the filtered observation array and clip values to low/high
obs = np.array(
[v for k, v in self.state.items() if k in self.obs_labels],
dtype=object # otherwise a warning is raised about ragged seq
).astype(np.float64)
obs = np.clip(obs, self._observation_space.low, self._observation_space.high).squeeze()
if self.rescale_spaces:
obs = to_scaled(obs, self._observation_space.low, self._observation_space.high)
return obs.copy(), self.state.copy()
def step_reward(self) -> Tuple[float, dict]:
"""Default reward is soft constraint on comfort bounds."""
viol_lower = [v for k,v in self.state.items() if k.startswith("zone_upper_viol_")]
viol_upper = [v for k,v in self.state.items() if k.startswith("zone_upper_viol_")]
rew = np.array(viol_lower)**2 + np.array(viol_upper)**2
return rew, {}
def is_terminal(self) -> bool:
"""Returns whether the current state is terminal. Currently this is only
true when the maximum number of episode steps is reached."""
return self.time_index == self.max_episode_steps - 1
@property
def real_power(self) -> float:
"""Return the real power consumed in the most recent step."""
return self.state["p_consumed"]
class FiveZoneROMThermalEnergyEnv(FiveZoneROMEnv):
"""Subclass with identical physics, but that balances energy and comfort costs."""
def step_reward(self) -> Tuple[float, dict]:
"""Overwriting reward to balance energy and comfort."""
alpha = 0.2
energy_consumption_reward = -self.state["p_consumed"] / 12.0
comfort_error = [
max(self.state["zone_upper_viol_{}".format(i)], self.state["zone_lower_viol_{}".format(i)], 0.0)
for i in range(self.num_zones)
]
comfort_reward = -(sum([x**2 for x in comfort_error]))
reward = alpha * energy_consumption_reward * 0.5 + (1. - alpha) * comfort_reward
meta = {
"comfort_rew": comfort_reward,
"energy_rew": energy_consumption_reward
}
return reward, meta
if __name__ == '__main__':
env = FiveZoneROMThermalEnergyEnv()
obs = env.reset()
print(obs)
| 2,732
| 7,031
| 23
|
eb7c87060f871a6453a9831294ee566533778c5a
| 1,512
|
py
|
Python
|
external_apps/timezones/tests.py
|
davemerwin/blue-channel
|
67a1a7fcc512574e6522aa57633f9d69c25c8906
|
[
"BSD-3-Clause"
] | 4
|
2016-05-08T06:07:50.000Z
|
2021-11-21T19:41:40.000Z
|
apps/external_apps/timezones/tests.py
|
indro/t2c
|
56482ad4aed150f29353e054db2c97b567243bf8
|
[
"MIT"
] | null | null | null |
apps/external_apps/timezones/tests.py
|
indro/t2c
|
56482ad4aed150f29353e054db2c97b567243bf8
|
[
"MIT"
] | 3
|
2017-07-09T02:14:54.000Z
|
2021-07-13T19:16:59.000Z
|
__test__ = {"API_TESTS": r"""
>>> from django.conf import settings
>>> ORIGINAL_TIME_ZONE = settings.TIME_ZONE
>>> settings.TIME_ZONE = "UTC"
>>> from timezones import forms
# the default case where no timezone is given explicitly.
# uses settings.TIME_ZONE.
>>> f = forms.LocalizedDateTimeField()
>>> f.clean("2008-05-30 14:30:00")
datetime.datetime(2008, 5, 30, 14, 30, tzinfo=<UTC>)
# specify a timezone explicity. this may come from a UserProfile for example.
>>> f = forms.LocalizedDateTimeField(timezone="America/Denver")
>>> f.clean("2008-05-30 14:30:00")
datetime.datetime(2008, 5, 30, 20, 30, tzinfo=<UTC>)
>>> f = forms.TimeZoneField()
>>> f.clean('US/Eastern')
<DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD>
>>> settings.TIME_ZONE = ORIGINAL_TIME_ZONE
""",
"DECORATOR_TESTS": r"""
>>> from timezones import decorators
>>> from datetime import *
>>> class Foo(object):
... datetime = datetime(2008, 6, 20, 23, 58, 17)
... @decorators.localdatetime('datetime')
... def localdatetime(self):
... return 'Australia/Lindeman'
...
>>> foo = Foo()
>>> foo.datetime
datetime.datetime(2008, 6, 20, 23, 58, 17)
>>> foo.localdatetime
datetime.datetime(2008, 6, 21, 9, 58, 17, tzinfo=<DstTzInfo 'Australia/Lindeman' EST+10:00:00 STD>)
>>> foo.localdatetime = datetime(2008, 6, 12, 23, 50, 0)
>>> foo.datetime
datetime.datetime(2008, 6, 12, 13, 50, tzinfo=<UTC>)
>>> foo.localdatetime
datetime.datetime(2008, 6, 12, 23, 50, tzinfo=<DstTzInfo 'Australia/Lindeman' EST+10:00:00 STD>)
"""}
| 32.869565
| 99
| 0.681217
|
__test__ = {"API_TESTS": r"""
>>> from django.conf import settings
>>> ORIGINAL_TIME_ZONE = settings.TIME_ZONE
>>> settings.TIME_ZONE = "UTC"
>>> from timezones import forms
# the default case where no timezone is given explicitly.
# uses settings.TIME_ZONE.
>>> f = forms.LocalizedDateTimeField()
>>> f.clean("2008-05-30 14:30:00")
datetime.datetime(2008, 5, 30, 14, 30, tzinfo=<UTC>)
# specify a timezone explicity. this may come from a UserProfile for example.
>>> f = forms.LocalizedDateTimeField(timezone="America/Denver")
>>> f.clean("2008-05-30 14:30:00")
datetime.datetime(2008, 5, 30, 20, 30, tzinfo=<UTC>)
>>> f = forms.TimeZoneField()
>>> f.clean('US/Eastern')
<DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD>
>>> settings.TIME_ZONE = ORIGINAL_TIME_ZONE
""",
"DECORATOR_TESTS": r"""
>>> from timezones import decorators
>>> from datetime import *
>>> class Foo(object):
... datetime = datetime(2008, 6, 20, 23, 58, 17)
... @decorators.localdatetime('datetime')
... def localdatetime(self):
... return 'Australia/Lindeman'
...
>>> foo = Foo()
>>> foo.datetime
datetime.datetime(2008, 6, 20, 23, 58, 17)
>>> foo.localdatetime
datetime.datetime(2008, 6, 21, 9, 58, 17, tzinfo=<DstTzInfo 'Australia/Lindeman' EST+10:00:00 STD>)
>>> foo.localdatetime = datetime(2008, 6, 12, 23, 50, 0)
>>> foo.datetime
datetime.datetime(2008, 6, 12, 13, 50, tzinfo=<UTC>)
>>> foo.localdatetime
datetime.datetime(2008, 6, 12, 23, 50, tzinfo=<DstTzInfo 'Australia/Lindeman' EST+10:00:00 STD>)
"""}
| 0
| 0
| 0
|
7daec3f052dbf49bcff51f38ad65316565f865b2
| 2,773
|
py
|
Python
|
app.py
|
ghanmi-hamza/Crunshbase_Scraper
|
6255693690d33d7dabce3695affa8ff4668eeef2
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
ghanmi-hamza/Crunshbase_Scraper
|
6255693690d33d7dabce3695affa8ff4668eeef2
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
ghanmi-hamza/Crunshbase_Scraper
|
6255693690d33d7dabce3695affa8ff4668eeef2
|
[
"Apache-2.0"
] | null | null | null |
from functions import Profile
from mongodb import MongoDb
from flask import Flask, render_template, request, redirect
app= Flask(__name__)
@app.route('/')
@app.route('/',methods=['POST'])
if __name__=='__main__':
app.run(debug=True)
| 31.511364
| 91
| 0.569419
|
from functions import Profile
from mongodb import MongoDb
from flask import Flask, render_template, request, redirect
app= Flask(__name__)
@app.route('/')
def start():
return(render_template('view.html'))
@app.route('/',methods=['POST'])
def index():
if request.method == 'POST':
startup_list = request.form['username']
startup_list = list(set(startup_list.split(',')))
startup_list_1 = []
database_name = 'hamza'
m=MongoDb(database_name)
for startup in startup_list:
if m.verify(startup):
#startup_data = m.show_data(startup)
if m.verify_status(startup, status='succeeded') :
pass
elif m.verify_status(startup, status='failed') :
startup_list_1.append(startup)
m.update_status(startup, {'status' : 'running'})
else:
m.insert_data(startup, {'status' : 'running'})
startup_list_1.append(startup)
MAX_THREADS = 5
if len(startup_list_1) >= 5:
for i in range(0,(len(startup_list_1)//MAX_THREADS)*MAX_THREADS,MAX_THREADS):
thread_1 = Profile(startup_list_1[i])
thread_2 = Profile(startup_list_1[i+1])
thread_3 = Profile(startup_list_1[i+2])
thread_4 = Profile(startup_list_1[i+3])
thread_5 = Profile(startup_list_1[i+4])
thread_1.start()
thread_2.start()
thread_3.start()
thread_4.start()
thread_5.start()
thread_1.join()
thread_2.join()
thread_3.join()
thread_4.join()
thread_5.join()
m.replace_data(collection = startup_list_1[i], data = thread_1.data)
m.replace_data(collection = startup_list_1[i+1], data = thread_2.data)
m.replace_data(collection = startup_list_1[i+2], data = thread_3.data)
m.replace_data(collection = startup_list_1[i+3], data = thread_4.data)
m.replace_data(collection = startup_list_1[i+4], data = thread_5.data)
if len(startup_list_1) - (len(startup_list_1)//MAX_THREADS)*MAX_THREADS > 0 :
for startup in startup_list_1[(len(startup_list_1)//MAX_THREADS)*MAX_THREADS:]:
p=Profile(startup)
p.run()
m.replace_data(collection = startup, data = p.data)
else:
for startup in startup_list_1:
p=Profile(startup)
p.run()
m.replace_data(collection = startup, data = p.data)
return(render_template('view1.html'))
if __name__=='__main__':
app.run(debug=True)
| 2,485
| 0
| 44
|
b5f856a2986cde9d382ce5d575f5d958c091b8ab
| 948
|
py
|
Python
|
src/gui/main_app.py
|
AndruePeters/fire_stream
|
8e1a89d4fe02500fbe202d6a0212ac7eaab32e00
|
[
"Apache-2.0"
] | null | null | null |
src/gui/main_app.py
|
AndruePeters/fire_stream
|
8e1a89d4fe02500fbe202d6a0212ac7eaab32e00
|
[
"Apache-2.0"
] | null | null | null |
src/gui/main_app.py
|
AndruePeters/fire_stream
|
8e1a89d4fe02500fbe202d6a0212ac7eaab32e00
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import ttk
import tkinter as tk
import tkinter.messagebox
from gui.menu_bar import MenuBar
from gui.stream_frame import StreamFrame
from config_file.fire_stream import ConfigFile
### Represents the main application, which is the combination of multiple frames
| 39.5
| 99
| 0.732068
|
from tkinter import ttk
import tkinter as tk
import tkinter.messagebox
from gui.menu_bar import MenuBar
from gui.stream_frame import StreamFrame
from config_file.fire_stream import ConfigFile
### Represents the main application, which is the combination of multiple frames
class MainApplication(ttk.Frame):
def __init__(self, parent, *args, **kwargs):
ttk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.parent.title("nginx RTMP Configuration Tool")
self.menuBar = MenuBar(self)
self.streamFrame = StreamFrame(self)
self.configFile = ConfigFile()
# preload the text boxes with the values saved to the configuration file
self.streamFrame.ytEntry.insert(tk.END, self.configFile.getProperty('youtube-stream-key'))
self.streamFrame.fbEntry.insert(tk.END, self.configFile.getProperty('facebook-stream-key'))
parent.config(menu=self.menuBar)
| 613
| 12
| 48
|
e5257525dd50663af6d42d8fcc65da7cae7adc9b
| 9,064
|
py
|
Python
|
clu/compilation/compiledb.py
|
fish2000/CLU
|
80bc2df5f001b5639d79ba979e19ec77a9931425
|
[
"BSD-3-Clause"
] | 1
|
2019-07-02T08:17:59.000Z
|
2019-07-02T08:17:59.000Z
|
clu/compilation/compiledb.py
|
fish2000/CLU
|
80bc2df5f001b5639d79ba979e19ec77a9931425
|
[
"BSD-3-Clause"
] | 13
|
2019-12-17T02:28:30.000Z
|
2021-11-17T03:46:10.000Z
|
clu/compilation/compiledb.py
|
fish2000/CLU
|
80bc2df5f001b5639d79ba979e19ec77a9931425
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import abc
# import clu.abstract
import collections.abc
import contextlib
import json
import sys, os
abstract = abc.abstractmethod
from clu.constants import consts
from clu.constants.exceptions import CDBError
from clu.fs.abc import BaseFSName
from clu.fs.filesystem import TemporaryName, Directory, rm_rf
from clu.fs.misc import u8str
# from clu.predicates import tuplize
from clu.repr import strfields
from clu.exporting import Exporter
exporter = Exporter(path=__file__)
export = exporter.decorator()
@export
@export
@export
export(CDBError)
# Assign the modules’ `__all__` and `__dir__` using the exporter:
__all__, __dir__ = exporter.all_and_dir()
if __name__ == '__main__':
sys.exit(test())
| 29.718033
| 83
| 0.553288
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import abc
# import clu.abstract
import collections.abc
import contextlib
import json
import sys, os
abstract = abc.abstractmethod
from clu.constants import consts
from clu.constants.exceptions import CDBError
from clu.fs.abc import BaseFSName
from clu.fs.filesystem import TemporaryName, Directory, rm_rf
from clu.fs.misc import u8str
# from clu.predicates import tuplize
from clu.repr import strfields
from clu.exporting import Exporter
exporter = Exporter(path=__file__)
export = exporter.decorator()
@export
class CDBSubBase(BaseFSName, collections.abc.Sequence):
@abstract
def push(self, filepath, command, directory=None,
destination=None):
...
@abstract
def __iter__(self):
...
@abstract
def __len__(self):
...
@abstract
def __getitem__(self, key):
...
@abstract
def inner_repr(self):
...
@abstract
def to_json(self):
""" Dump the contents of the CDB as a JSON UTF-8 string. """
...
@abstract
def __str__(self):
...
@abstract
def __bytes__(self):
...
@export
class CDBBase(CDBSubBase):
def __init__(self):
""" Initialize the CDB base type.
The CDBBase ancestor type takes no constructor arguments.
"""
self.clear()
def __iter__(self):
yield from self.entries.values()
def __len__(self):
return len(self.entries)
def __getitem__(self, key):
try:
return self.entries[int(key)]
except (ValueError, KeyError):
skey = str(key)
if os.extsep in skey:
for entry in self.entries:
if entry['file'] == skey:
return entry
raise KeyError(key)
def push(self, source, command, directory=None,
destination=None):
""" Add an entry to the CDB. """
if not source:
raise CDBError("a file source is required per entry")
entry = {
'directory' : os.fspath(directory or os.getcwd()),
'command' : u8str(command),
'file' : source
}
if destination:
entry.update({
'output' : destination
})
self.entries[source] = entry
def clear(self):
""" Reset the CDB’s “entries” mapping to a fresh empty dict. """
self.entries = {}
return self
def rollout(self):
""" Call to “roll” the CDB values out into a new list. """
return list(self)
def inner_repr(self):
return strfields(self, type(self).fields)
def to_json(self):
return json.dumps(self.rollout())
def __str__(self):
return self.to_json()
def __bytes__(self):
return bytes(self.to_json(), encoding=consts.ENCODING)
def __bool__(self):
# N.B. Can’t use BaseFSName.__bool__(…) as it will
# always be Falsey for any instances that haven’t
# yet been written to disk:
return bool(self.entries)
@export
class CDBJsonFile(CDBBase, contextlib.AbstractContextManager):
fields = ('filename',
'contextdir',
'target') # ‘name’ and ‘exists’ come from BaseFSName
filename = 'compilation_database.json'
splitname = os.path.splitext(filename)
@classmethod
def in_directory(cls, directory):
""" The compilation database filename is a constant –
use this function to check if a file by that name
exists in a given directory (which may be passed
as string data, bytes data, or an instance of the
“os.PathLike” ABC).
Like so:
>>> builddir = pathlib.Path('/var/tmp/build')
>>> if CDBJsonFile.in_directory(builddir):
>>> # …do something!
"""
return cls.filename in Directory(directory)
def __init__(self, directory=None, hidden=False):
""" Initialize a JSON-backed compilation database (CDB).
Pass a path-like instance as “directory” to specify where
on the filesystem the CDB should be created. The default
directory is whatever the current working directory happens
to be when you initialize the CDB instance.
The filename of a JSON compilation database is hardcoded
to “compilation_database.json” – this is part of the LLVM
specification for such things. Optionally, you may specify
a “hidden=True” argument in order to prefix this filename
with a dot, thus hiding it as per the long-standing UNIX-ish
custom. Doing so may be somehow “non-conformant”, though;
you have been warned.
"""
super().__init__()
self.contextdir = Directory(directory)
filename = hidden and f"{consts.QUALIFIER}{self.filename}" or self.filename
self.target = self.contextdir.subpath(filename)
self.read_from = None
self.written_to = None
@property
def name(self):
return self.target
@property
def exists(self):
return os.path.isfile(self.name)
def read(self, path=None):
readpath = path or self.target
if not readpath:
raise CDBError("no path value from which to read")
readpath = os.fspath(readpath)
if not os.path.exists(readpath):
raise CDBError("no file from which to read")
with open(readpath, mode="r") as handle:
try:
cdblist = json.load(handle)
except json.JSONDecodeError as json_error:
raise CDBError("JSON decoder error") from json_error
else:
for cdbentry in cdblist:
key = cdbentry.get('file')
self.entries[key] = dict(cdbentry)
self.read_from = readpath
return self
def write(self, path=None):
with TemporaryName(prefix=self.splitname[0],
suffix=self.splitname[1][1:]) as tn:
with open(tn.name, mode='w') as handle:
handle.write(self.to_json())
if path is None:
if self.exists:
rm_rf(self.name)
tn.copy(self.name)
self.written_to = self.name
else:
writepath = os.fspath(path)
if os.path.isdir(writepath):
raise CDBError("can't overwrite a directory")
if os.path.isfile(writepath) or \
os.path.islink(writepath):
rm_rf(writepath)
tn.copy(writepath)
self.written_to = writepath
return self
def __enter__(self):
if self.exists:
self.read()
return self
def __exit__(self, exc_type=None,
exc_val=None,
exc_tb=None):
self.write()
export(CDBError)
# Assign the modules’ `__all__` and `__dir__` using the exporter:
__all__, __dir__ = exporter.all_and_dir()
def test():
from clu.testing.utils import inline
# from pprint import pprint
@inline
def test_one():
from clu.fs.filesystem import TemporaryName, td
tmp = td()
cdb = CDBJsonFile(directory=tmp)
assert os.path.samefile(tmp, cdb.contextdir)
assert not cdb # no entries yet
assert not os.path.isfile(cdb.name)
assert not cdb.exists
print("CDB file path:", cdb.name)
cdb.push('yo_dogg.cc', 'clang++ -o yo_dogg.o -pipe -Wall -pedantic')
assert cdb
print("JSON:", cdb.to_json())
prefix, suffix = cdb.splitname
# with TemporaryName(prefix=prefix,
# suffix=suffix[1:],
# randomized=True) as tfn:
#
# cdb.write(path=tfn.name)
#
# assert cdb
# assert os.path.isfile(cdb.name)
# assert cdb.exists
#
# assert not cdb
# assert not os.path.isfile(cdb.name)
# assert not cdb.exists
cdb.write(f"/tmp/{CDBJsonFile.filename}")
# assert cdb
# assert os.path.isfile(cdb.name)
# assert cdb.exists
print("cdb.written_to:", cdb.written_to)
#@inline
def test_two():
pass # INSERT TESTING CODE HERE, pt. II
#@inline.diagnostic
def show_me_some_values():
pass # INSERT DIAGNOSTIC CODE HERE
return inline.test(100)
if __name__ == '__main__':
sys.exit(test())
| 4,198
| 4,055
| 89
|
a32c7f51dddaf8600bf3abf4629e346738287384
| 5,067
|
py
|
Python
|
c-kernel/test/test_cleaners.py
|
Song655/arlo
|
cee1613d4a2b2e1263da9d5b4b9930eef569509c
|
[
"Apache-2.0"
] | 1
|
2019-10-18T13:11:01.000Z
|
2019-10-18T13:11:01.000Z
|
c-kernel/test/test_cleaners.py
|
Song655/arlo
|
cee1613d4a2b2e1263da9d5b4b9930eef569509c
|
[
"Apache-2.0"
] | 1
|
2019-01-28T23:07:32.000Z
|
2019-01-28T23:07:32.000Z
|
c-kernel/test/test_cleaners.py
|
Song655/arlo
|
cee1613d4a2b2e1263da9d5b4b9930eef569509c
|
[
"Apache-2.0"
] | 5
|
2018-03-27T03:30:34.000Z
|
2019-10-18T13:05:37.000Z
|
import os
import sys
sys.path.append('../../arl-python')
import numpy as np
import time
import argparse
from arl.image.cleaners import *
from utils import *
if __name__ == '__main__':
np.random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data')
parser.add_argument('--niter', type=int, default=0)
parser.add_argument('--gain', type=float, default=0.0)
parser.add_argument('--thresh', type=float, default=0.0)
parser.add_argument('--fracthresh', type=float, default=0.0)
parser.add_argument('--nscales', type=int, default=0)
parser.add_argument('--nmoments', type=int, default=0)
parser.add_argument('--nx', type=int, default=0)
parser.add_argument('--ny', type=int, default=0)
args = parser.parse_args()
test_cleaners(args.data_dir, args.niter, args.gain, args.thresh, args.fracthresh, \
args.nscales, args.nmoments, args.nx, args.ny)
| 36.453237
| 110
| 0.678705
|
import os
import sys
sys.path.append('../../arl-python')
import numpy as np
import time
import argparse
from arl.image.cleaners import *
from utils import *
def msmfsclean_simplify(dirty, psf, window, gain, thresh, niter, scales, fracthresh, findpeak='CASA'):
assert 0.0 < gain < 2.0
assert niter > 0
assert len(scales) > 0
m_model = np.zeros(dirty.shape)
nscales = len(scales)
pmax = psf.max()
assert pmax > 0.0
psfpeak = np.argmax(np.fabs(psf))
dmax = dirty.max()
dpeak = np.argmax(dirty)
lpsf = psf / pmax
ldirty = dirty / pmax
nmoments, ny, nx = dirty.shape
assert psf.shape[0] == 2 * nmoments
# Create the "scale basis functions" in Algorithm 1
scaleshape = [nscales, ldirty.shape[1], ldirty.shape[2]]
scalestack = create_scalestack(scaleshape, scales, norm=True)
pscaleshape = [nscales, lpsf.shape[1], lpsf.shape[2]]
pscalestack = create_scalestack(pscaleshape, scales, norm=True)
# Calculate scale convolutions of moment residuals
smresidual = calculate_scale_moment_residual(ldirty, scalestack)
smresidual0 = smresidual.copy()
# Calculate scale scale moment moment psf, Hessian, and inverse of Hessian
# scale scale moment moment psf is needed for update of scale-moment residuals
# Hessian is needed in calculation of optimum for any iteration
# Inverse Hessian is needed to calculate principal solution in moment-space
ssmmpsf = calculate_scale_scale_moment_moment_psf(lpsf, pscalestack)
hsmmpsf, ihsmmpsf = calculate_scale_inverse_moment_moment_hessian(ssmmpsf)
# The window is scale dependent - we form it by smoothing and thresholding
# the input window. This prevents components being placed too close to the
# edge of the Image.
if window is None:
windowstack = None
else:
windowstack = np.zeros_like(scalestack)
windowstack[convolve_scalestack(scalestack, window) > 0.9] = 1.0
absolutethresh = max(thresh, fracthresh * np.fabs(smresidual[0, 0, :, :]).max())
# Start iterations
scale_counts = np.zeros(nscales, dtype='int')
scale_flux = np.zeros(nscales)
# Use original algorithm
start = time.time()
for i in range(niter):
# Find the optimum scale and location.
mscale, mx, my, mval = find_global_optimum(hsmmpsf, ihsmmpsf, smresidual, windowstack, findpeak)
scale_counts[mscale] += 1
scale_flux[mscale] += mval[0]
# Are we ready to stop yet?
peak = np.max(np.fabs(mval))
if peak < absolutethresh:
break
# Calculate indices needed for lhs and rhs of updates to model and residual
lhs, rhs = overlapIndices(ldirty[0, ...], psf[0, ...], mx, my)
m_model = update_moment_model(m_model, pscalestack, lhs, rhs, gain, mscale, mval)
smresidual = update_scale_moment_residual(smresidual, ssmmpsf, lhs, rhs, gain, mscale, mval)
residual = pmax * smresidual[0, :, :, :]
stop = time.time()
print('Original Time: {:.2f}s'.format(stop - start))
return m_model, residual, pscalestack, smresidual0, \
ssmmpsf, hsmmpsf, ihsmmpsf, ldirty, psf
def test_cleaners(data_dir, niter, gain, thresh, fracthresh, nscales, nmoments, nx, ny):
dirty = create_random_data((nmoments, ny, nx), -100, 100, 'float')
psf = create_random_data((nmoments*2, ny, nx), -5, 5, 'float')
m_model, residual, pscalestack, smresidual0, \
ssmmpsf, hsmmpsf, ihsmmpsf, ldirty, psf \
= msmfsclean_simplify(dirty, psf, None, gain=gain, thresh=thresh, niter=niter, scales=[0, 3, 10, 30],\
fracthresh=fracthresh, findpeak='ARL')
store_data(os.path.join(data_dir, 'm_model.dat'), m_model)
store_data(os.path.join(data_dir, 'residual.dat'), residual)
store_data(os.path.join(data_dir, 'pscalestack.dat'), pscalestack)
store_data(os.path.join(data_dir, 'smresidual.dat'), smresidual0)
store_data(os.path.join(data_dir, 'ssmmpsf.dat'), ssmmpsf)
store_data(os.path.join(data_dir, 'hsmmpsf.dat'), hsmmpsf)
store_data(os.path.join(data_dir, 'ihsmmpsf.dat'), ihsmmpsf)
store_data(os.path.join(data_dir, 'ldirty.dat'), ldirty)
store_data(os.path.join(data_dir, 'psf.dat'), psf)
if __name__ == '__main__':
np.random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data')
parser.add_argument('--niter', type=int, default=0)
parser.add_argument('--gain', type=float, default=0.0)
parser.add_argument('--thresh', type=float, default=0.0)
parser.add_argument('--fracthresh', type=float, default=0.0)
parser.add_argument('--nscales', type=int, default=0)
parser.add_argument('--nmoments', type=int, default=0)
parser.add_argument('--nx', type=int, default=0)
parser.add_argument('--ny', type=int, default=0)
args = parser.parse_args()
test_cleaners(args.data_dir, args.niter, args.gain, args.thresh, args.fracthresh, \
args.nscales, args.nmoments, args.nx, args.ny)
| 4,053
| 0
| 46
|
cd3da064f9bd501772402261c8f184e8c014f863
| 143
|
py
|
Python
|
02_sequences/0206_augmented_assignment/020601_list/__main__.py
|
forseti/py-workout-01
|
9ebb36748ec7d4751b2c81086134df320c0f58ed
|
[
"Apache-2.0"
] | null | null | null |
02_sequences/0206_augmented_assignment/020601_list/__main__.py
|
forseti/py-workout-01
|
9ebb36748ec7d4751b2c81086134df320c0f58ed
|
[
"Apache-2.0"
] | null | null | null |
02_sequences/0206_augmented_assignment/020601_list/__main__.py
|
forseti/py-workout-01
|
9ebb36748ec7d4751b2c81086134df320c0f58ed
|
[
"Apache-2.0"
] | null | null | null |
l = [1, 2, 3]
id1 = id(l)
print(f"id1: {id1}")
l *= 2
id2 = id(l)
print(f"id2: {id2}")
assert id1 == id2
print(f"id1 == id2: {id1 == id2}")
| 11.916667
| 34
| 0.503497
|
l = [1, 2, 3]
id1 = id(l)
print(f"id1: {id1}")
l *= 2
id2 = id(l)
print(f"id2: {id2}")
assert id1 == id2
print(f"id1 == id2: {id1 == id2}")
| 0
| 0
| 0
|
81ab10c9e68eaa61ece5a776c79bd7fc8186ea7b
| 4,691
|
py
|
Python
|
tests/word_distance.py
|
nv-d/open-tamil
|
0fcb1cece5ffd6263210db987bede09566353e80
|
[
"MIT"
] | 2
|
2021-07-17T02:52:38.000Z
|
2021-07-17T02:52:52.000Z
|
tests/word_distance.py
|
nv-d/open-tamil
|
0fcb1cece5ffd6263210db987bede09566353e80
|
[
"MIT"
] | null | null | null |
tests/word_distance.py
|
nv-d/open-tamil
|
0fcb1cece5ffd6263210db987bede09566353e80
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# (C) 2015 Muthiah Annamalai
#
# This file is part of 'open-tamil' package tests
#
# setup the paths
from __future__ import print_function
from opentamiltests import *
if __name__ == "__main__":
unittest.main()
| 30.070513
| 88
| 0.484971
|
# -*- coding: utf-8 -*-
# (C) 2015 Muthiah Annamalai
#
# This file is part of 'open-tamil' package tests
#
# setup the paths
from __future__ import print_function
from opentamiltests import *
class WordsSimilarityLevenshtein(unittest.TestCase):
def test_Levenshtein_distance(self):
values = [
[u"setting", u"kitten", 4],
[u"object", u"object", 0],
[u"veil", u"vail", 1],
[u"தேங்காய்", u"மாங்காய்", 1],
]
for items in values:
k = items[0:2]
v = items[2]
self.assertEqual(ngram.Distance.edit_distance(k[0], k[1]), v)
return
def test_Levenshtein_dist_matrix(self):
val = [u"food", u"allergy", u"வார்த்தை", u"இது", u"ஒரு", u"ஒருங்குறி", u"வரிசை"]
L = len(val)
dists = [[0 for i in range(0, L)] for i in range(0, L)]
trueDists = [
[0, 7, 4, 4, 4, 5, 4],
[7, 0, 7, 7, 7, 7, 7],
[4, 7, 0, 4, 4, 5, 4],
[4, 7, 4, 0, 2, 5, 3],
[4, 7, 4, 2, 0, 3, 3],
[5, 7, 5, 5, 3, 0, 5],
[4, 7, 4, 3, 3, 5, 0],
]
for i in range(0, L):
for j in range(0, L):
if i == j:
continue
wA, wB = val[i], val[j]
dists[i][j] = ngram.Distance.edit_distance(wA, wB)
self.assertEqual(dists, trueDists)
return
class WordSimilaritySpeller(unittest.TestCase):
def get_min_distance_alternate(self, pizhai):
from ngram.Distance import edit_distance
agarathi_sorkal = [u"அவிழ்", u"அவல்", u"அவள்", u"தவில்", u"தவள்"]
distances = list(map(lambda w: edit_distance(pizhai, w), agarathi_sorkal))
print(distances)
m = min(distances)
idx = -1
matches = []
while True:
old_idx = idx
try:
idx = distances.index(m, 1 + old_idx, len(distances))
except ValueError:
break
matches.append(agarathi_sorkal[idx])
return matches
def test_simple_speller(self):
pizhai_sorkal = [u"ஏவள்", u"இவல்"]
answers = [[u"அவள்", u"தவள்"], [u"அவல்"]]
output = []
for pizhai in pizhai_sorkal:
alternate = self.get_min_distance_alternate(pizhai)
print(u"%s => %s" % (pizhai, u",".join(alternate)))
output.append(alternate)
self.assertSequenceEqual(output, answers)
class WordsSimilarityDiceJaccard(unittest.TestCase):
def test_Dice_distance(self):
for word in [
u"food",
u"allergy",
u"வார்த்தை",
u"இது",
u"ஒரு",
u"ஒருங்குறி",
u"வரிசை",
]:
self.assertEqual(ngram.Distance.Dice_coeff(word, word), 1.0)
return
def test_Dice_in_the_middle(self):
wordA, wordB = u"நிரலாக்க", u"உதாரணம்" # only common letter = ர.
# n_A = 5, n_B = 5, n_AB = 1; dist = 2*1/(5+5)
dist = ngram.Distance.Dice_coeff(wordA, wordB)
self.assertEqual(dist, 0.2)
def test_Dice_random(self):
for wordA in [
u"food",
u"allergy",
u"வார்த்தை",
u"இது",
u"ஒரு",
u"ஒருங்குறி",
u"வரிசை",
]:
for wordB in u"இது ஒரு எழில் தமிழ் நிரலாக்க மொழி உதாரணம்".split(u" "):
dist = ngram.Distance.Dice_coeff(wordA, wordB)
self.assertTrue(dist <= 1.0 and dist >= 0.0)
return
class WordsSimilarityJaccard(unittest.TestCase):
def test_Dice_distance(self):
for word in [
u"food",
u"allergy",
u"வார்த்தை",
u"இது",
u"ஒரு",
u"ஒருங்குறி",
u"வரிசை",
]:
self.assertEqual(ngram.Distance.Jaccard_coeff(word, word), 0.0)
return
def test_Dice_in_the_middle(self):
wordA, wordB = u"நிரலாக்க", u"உதாரணம்" # only common letter = ர.
# n_A = 5, n_B = 5, n_AB = 1; dist = 2*1/(5+5)
dist = ngram.Distance.Jaccard_coeff(wordA, wordB)
self.assertEqual(dist, 0.8)
def test_Dice_random(self):
for wordA in [
u"food",
u"allergy",
u"வார்த்தை",
u"இது",
u"ஒரு",
u"ஒருங்குறி",
u"வரிசை",
]:
for wordB in u"இது ஒரு எழில் தமிழ் நிரலாக்க மொழி உதாரணம்".split(u" "):
dist = ngram.Distance.Jaccard_coeff(wordA, wordB)
self.assertTrue(dist <= 1.0 and dist >= 0.0)
return
if __name__ == "__main__":
unittest.main()
| 4,576
| 115
| 358
|
4e012ce81d73ddb0a61c59fd864ed7590f969a8d
| 99
|
py
|
Python
|
blog/publications/admin.py
|
Thierryvil/blog-rest-django3
|
1c7a224d531cea86812142d4d488d99431aa05e9
|
[
"MIT"
] | null | null | null |
blog/publications/admin.py
|
Thierryvil/blog-rest-django3
|
1c7a224d531cea86812142d4d488d99431aa05e9
|
[
"MIT"
] | null | null | null |
blog/publications/admin.py
|
Thierryvil/blog-rest-django3
|
1c7a224d531cea86812142d4d488d99431aa05e9
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Publication
admin.site.register(Publication)
| 19.8
| 32
| 0.838384
|
from django.contrib import admin
from .models import Publication
admin.site.register(Publication)
| 0
| 0
| 0
|
9647183d77c8685b21250b167a7a0bb8f6a2de37
| 265
|
py
|
Python
|
mopidy_async_client/__init__.py
|
SvineruS/mopidy-json-client
|
b1fe6eea02f9dbf9fe0c9d1d087421184fd2e0c9
|
[
"Apache-2.0"
] | null | null | null |
mopidy_async_client/__init__.py
|
SvineruS/mopidy-json-client
|
b1fe6eea02f9dbf9fe0c9d1d087421184fd2e0c9
|
[
"Apache-2.0"
] | null | null | null |
mopidy_async_client/__init__.py
|
SvineruS/mopidy-json-client
|
b1fe6eea02f9dbf9fe0c9d1d087421184fd2e0c9
|
[
"Apache-2.0"
] | null | null | null |
"""Async Mopidy Client via JSON/RPC Websocket interface"""
# Fork of https://github.com/ismailof/mopidy-json-client by ismailof
__author__ = 'svinerus (svinerus@gmail.com)'
__version__ = '0.6.4'
from .client import MopidyClient
__all__ = [
'MopidyClient',
]
| 22.083333
| 68
| 0.732075
|
"""Async Mopidy Client via JSON/RPC Websocket interface"""
# Fork of https://github.com/ismailof/mopidy-json-client by ismailof
__author__ = 'svinerus (svinerus@gmail.com)'
__version__ = '0.6.4'
from .client import MopidyClient
__all__ = [
'MopidyClient',
]
| 0
| 0
| 0
|
b72b981336dd182efb7950a9f68fb5c4b03bbd61
| 1,161
|
py
|
Python
|
pretix_billetaarhusgdpr/forms.py
|
aakb/pretix-billet-aarhus-gdpr
|
fcdc7b0e36facd4726ae4d641b91d9711bc206b9
|
[
"Apache-2.0"
] | null | null | null |
pretix_billetaarhusgdpr/forms.py
|
aakb/pretix-billet-aarhus-gdpr
|
fcdc7b0e36facd4726ae4d641b91d9711bc206b9
|
[
"Apache-2.0"
] | null | null | null |
pretix_billetaarhusgdpr/forms.py
|
aakb/pretix-billet-aarhus-gdpr
|
fcdc7b0e36facd4726ae4d641b91d9711bc206b9
|
[
"Apache-2.0"
] | null | null | null |
from django.utils.translation import ugettext_lazy as _
from i18nfield.forms import I18nFormField, I18nTextarea
from pretix.base.forms import SettingsForm
| 43
| 119
| 0.705426
|
from django.utils.translation import ugettext_lazy as _
from i18nfield.forms import I18nFormField, I18nTextarea
from pretix.base.forms import SettingsForm
class GDPRSettingsForm(SettingsForm):
billetaarhusgdpr_message = I18nFormField(
label=_("GDPR message"),
help_text=_("The GDPR message will be shown before the user registers information in the system."),
required=True,
widget=I18nTextarea
)
billetaarhusgdpr_consent_text = I18nFormField(
label=_('GDPR consent text'),
help_text=_('The GDPR consent text must be accepted by the user before a purchase is possible.'),
required=True,
widget=I18nTextarea
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['billetaarhusgdpr_message'].widget.attrs['rows'] = '3'
# self.fields['billetaarhusgdpr_message'].widget.attrs['placeholder'] = _('GDPR message placeholder')
self.fields['billetaarhusgdpr_consent_text'].widget.attrs['rows'] = '3'
# self.fields['billetaarhusgdpr_consent_text'].widget.attrs['placeholder'] = _('GDPR consent text placeholder')
| 442
| 540
| 23
|
4fa0c6f8f2e70c57688b182c75fe8f7d7e0e1e4e
| 149
|
py
|
Python
|
dhost/dapps/utils.py
|
dhost-project/dhost
|
ca6a4a76a737174b24165e20edeb1d1019a9424b
|
[
"MIT"
] | null | null | null |
dhost/dapps/utils.py
|
dhost-project/dhost
|
ca6a4a76a737174b24165e20edeb1d1019a9424b
|
[
"MIT"
] | 67
|
2021-07-06T11:50:25.000Z
|
2021-10-14T13:45:51.000Z
|
dhost/dapps/utils.py
|
dhost-project/dhost
|
ca6a4a76a737174b24165e20edeb1d1019a9424b
|
[
"MIT"
] | null | null | null |
def get_dapp_type(dapp):
"""Return the available dapp implementation."""
if hasattr(dapp, "ipfsdapp"):
return "ipfs"
return None
| 24.833333
| 51
| 0.651007
|
def get_dapp_type(dapp):
"""Return the available dapp implementation."""
if hasattr(dapp, "ipfsdapp"):
return "ipfs"
return None
| 0
| 0
| 0
|
7074a2be32b9d2c703f8ca0dfd6cef5ce9b6dabc
| 15,067
|
py
|
Python
|
kraken_report.py
|
mpieva/nuclear_sediment_pipeline
|
3570ad8c217d8f54f8b1c888d1f4ce5d29388742
|
[
"MIT"
] | 1
|
2021-04-16T17:18:14.000Z
|
2021-04-16T17:18:14.000Z
|
kraken_report.py
|
mpieva/nuclear_sediment_pipeline
|
3570ad8c217d8f54f8b1c888d1f4ce5d29388742
|
[
"MIT"
] | null | null | null |
kraken_report.py
|
mpieva/nuclear_sediment_pipeline
|
3570ad8c217d8f54f8b1c888d1f4ce5d29388742
|
[
"MIT"
] | null | null | null |
"""Parse a kraken output file and generate a report and possibly extract reads for selected clades. (Adapted from original kraken-report.pl)
"""
import sys
import gzip
from csv import reader
from Bio import SeqIO
from pysam import AlignmentFile
from collections import defaultdict
import argparse
from pathlib import Path
import os
import json
import contextlib
#grep 'scientific name' names.dmp |cut -d'|' -f 1,2 |gzip -c >names_trimmed.dmp
#cut -d '|' -f 1,2,3 nodes.dmp|gzip -c >nodes_trimmed.dmp
# remember to use filtered nodes.dmp and names.dmp
def load_taxonomy(db_prefix):
"""Create/Read a taxonomy maps into dicts
"""
global name_map
name_map = {}
global rank_map
rank_map = {}
global child_lists
child_lists = defaultdict(list)
global name_clade_map
parent_map = {}
#read the taxonomy .dmp to and create or dict
if not os.path.exists(db_prefix+"/taxonomy/name_map.json") or \
not os.path.exists(db_prefix+"/taxonomy/rank_map.json") or \
not os.path.exists(db_prefix+"/taxonomy/child_lists.json") or \
not os.path.exists(db_prefix+"/taxonomy/parent_map.json"):
print ("Map files don't exist, creating json...", file=sys.stderr)
with gzip.open(db_prefix+"/taxonomy/names_trimmed.dmp.gz", 'rt') as name_file:
for line in name_file:
node_id, name = line.strip().split('|')
node_id = node_id.strip()
name = name.strip()
name_map[node_id] = name
with gzip.open(db_prefix+"/taxonomy/nodes_trimmed.dmp.gz", 'rt') as nodes_file:
for line in nodes_file:
node_id, parent_id, rank = line.strip().split('|')
node_id = node_id.strip()
parent_id = parent_id.strip()
rank = rank.strip()
if node_id == '1':
parent_id = '0'
child_lists[parent_id].append(node_id)
rank_map[node_id] = rank
parent_map[node_id] = parent_id
#save our dicts as json
with open(db_prefix+"/taxonomy/name_map.json",'w') as name_map_file, \
open(db_prefix+"/taxonomy/rank_map.json",'w') as rank_map_file, \
open(db_prefix+"/taxonomy/child_lists.json",'w') as child_lists_file, \
open(db_prefix+"/taxonomy/parent_map.json",'w') as parent_map_file:
json.dump(name_map,name_map_file)
json.dump(rank_map, rank_map_file)
json.dump(child_lists,child_lists_file)
json.dump(parent_map, parent_map_file)
else: #load the json
with open(db_prefix+"/taxonomy/name_map.json",'r') as name_map_file, \
open(db_prefix+"/taxonomy/rank_map.json",'r') as rank_map_file, \
open(db_prefix+"/taxonomy/child_lists.json",'r') as child_lists_file:
name_map = json.load(name_map_file)
rank_map = json.load(rank_map_file)
child_lists = json.load(child_lists_file)
name_clade_map = {v: k for k, v in name_map.items()}
#return (name_map, rank_map, child_lists, name_clade_map)
def rank_code(rank):
"""Translate ranks into single letters code
"""
if rank == "species": return "S"
if rank == "genus": return "G"
if rank == "family": return "F"
if rank == "order": return "O"
if rank == "class": return "C"
if rank == "phylum": return "P"
if rank == "kingdom": return "K"
if rank == "superkingdom": return "D"
return "-"
def get_taxonomy_str(taxid):
"""Generate the full taxonomy from a specific clade
Parameters
----------
taxid: str
Returns
-------
str
"""
taxid_string = known_taxonomy_strings.get(taxid, False)
if not taxid_string:
nodes = []
while (taxid != '0'):
nodes += [name_map[taxid]]
taxid = parent_map[taxid]
taxid_string = ';'.join(nodes[::-1])
known_taxonomy_strings[taxid] = taxid_string
return taxid_string
@contextlib.contextmanager
def extract_fasta_from_id(fileout, id_list, seqfile, min_length):
"""Extract reads assigned to specific taxa.
Parameters
----------
fileout: str
Filename to write into
id_list: list of
"""
if seqfile.endswith('a') or seqfile.endswith('a.gz'):
file_type = "fasta"
file_suffix = '.fa'
elif seqfile.endswith('q') or seqfile.endswith('q.gz'):
file_type = "fastq"
file_suffix = '.fq'
with open(fileout+file_suffix, 'w') as fout, \
gzip.open(seqfile, "rt") if seqfile.endswith("gz") else _ret_file(seqfile) as seqfile:
# working with a generator expression, may be better memory-wise
input_seq_iterator = SeqIO.parse(seqfile, file_type)
fasta_seq_iterator = (rec for rec in input_seq_iterator if rec.id in id_list and len(rec) >= min_length)
count = SeqIO.write(fasta_seq_iterator, fout, file_type)
if len(id_list) != count: # sanity check you may want to extract from a demultiplexed file
print("Warning, EOF reached but", len(id_list) - count, "sequences remained, is extractFile the original source?", file=sys.stderr)
#this function will discard child clades in order to have a proper summation
if __name__ == "__main__":
name_map = rank_map = child_lists = node_name_map = clade_counts = taxo_counts = seq_count = extract_ids = seq_ids = None
_main()
| 44.842262
| 172
| 0.629521
|
"""Parse a kraken output file and generate a report and possibly extract reads for selected clades. (Adapted from original kraken-report.pl)
"""
import sys
import gzip
from csv import reader
from Bio import SeqIO
from pysam import AlignmentFile
from collections import defaultdict
import argparse
from pathlib import Path
import os
import json
import contextlib
#grep 'scientific name' names.dmp |cut -d'|' -f 1,2 |gzip -c >names_trimmed.dmp
#cut -d '|' -f 1,2,3 nodes.dmp|gzip -c >nodes_trimmed.dmp
# remember to use filtered nodes.dmp and names.dmp
def load_taxonomy(db_prefix):
"""Create/Read a taxonomy maps into dicts
"""
global name_map
name_map = {}
global rank_map
rank_map = {}
global child_lists
child_lists = defaultdict(list)
global name_clade_map
parent_map = {}
#read the taxonomy .dmp to and create or dict
if not os.path.exists(db_prefix+"/taxonomy/name_map.json") or \
not os.path.exists(db_prefix+"/taxonomy/rank_map.json") or \
not os.path.exists(db_prefix+"/taxonomy/child_lists.json") or \
not os.path.exists(db_prefix+"/taxonomy/parent_map.json"):
print ("Map files don't exist, creating json...", file=sys.stderr)
with gzip.open(db_prefix+"/taxonomy/names_trimmed.dmp.gz", 'rt') as name_file:
for line in name_file:
node_id, name = line.strip().split('|')
node_id = node_id.strip()
name = name.strip()
name_map[node_id] = name
with gzip.open(db_prefix+"/taxonomy/nodes_trimmed.dmp.gz", 'rt') as nodes_file:
for line in nodes_file:
node_id, parent_id, rank = line.strip().split('|')
node_id = node_id.strip()
parent_id = parent_id.strip()
rank = rank.strip()
if node_id == '1':
parent_id = '0'
child_lists[parent_id].append(node_id)
rank_map[node_id] = rank
parent_map[node_id] = parent_id
#save our dicts as json
with open(db_prefix+"/taxonomy/name_map.json",'w') as name_map_file, \
open(db_prefix+"/taxonomy/rank_map.json",'w') as rank_map_file, \
open(db_prefix+"/taxonomy/child_lists.json",'w') as child_lists_file, \
open(db_prefix+"/taxonomy/parent_map.json",'w') as parent_map_file:
json.dump(name_map,name_map_file)
json.dump(rank_map, rank_map_file)
json.dump(child_lists,child_lists_file)
json.dump(parent_map, parent_map_file)
else: #load the json
with open(db_prefix+"/taxonomy/name_map.json",'r') as name_map_file, \
open(db_prefix+"/taxonomy/rank_map.json",'r') as rank_map_file, \
open(db_prefix+"/taxonomy/child_lists.json",'r') as child_lists_file:
name_map = json.load(name_map_file)
rank_map = json.load(rank_map_file)
child_lists = json.load(child_lists_file)
name_clade_map = {v: k for k, v in name_map.items()}
#return (name_map, rank_map, child_lists, name_clade_map)
def rank_code(rank):
"""Translate ranks into single letters code
"""
if rank == "species": return "S"
if rank == "genus": return "G"
if rank == "family": return "F"
if rank == "order": return "O"
if rank == "class": return "C"
if rank == "phylum": return "P"
if rank == "kingdom": return "K"
if rank == "superkingdom": return "D"
return "-"
def get_taxonomy_str(taxid):
"""Generate the full taxonomy from a specific clade
Parameters
----------
taxid: str
Returns
-------
str
"""
taxid_string = known_taxonomy_strings.get(taxid, False)
if not taxid_string:
nodes = []
while (taxid != '0'):
nodes += [name_map[taxid]]
taxid = parent_map[taxid]
taxid_string = ';'.join(nodes[::-1])
known_taxonomy_strings[taxid] = taxid_string
return taxid_string
@contextlib.contextmanager
def _ret_file(f):
yield f
def extract_fasta_from_id(fileout, id_list, seqfile, min_length):
"""Extract reads assigned to specific taxa.
Parameters
----------
fileout: str
Filename to write into
id_list: list of
"""
if seqfile.endswith('a') or seqfile.endswith('a.gz'):
file_type = "fasta"
file_suffix = '.fa'
elif seqfile.endswith('q') or seqfile.endswith('q.gz'):
file_type = "fastq"
file_suffix = '.fq'
with open(fileout+file_suffix, 'w') as fout, \
gzip.open(seqfile, "rt") if seqfile.endswith("gz") else _ret_file(seqfile) as seqfile:
# working with a generator expression, may be better memory-wise
input_seq_iterator = SeqIO.parse(seqfile, file_type)
fasta_seq_iterator = (rec for rec in input_seq_iterator if rec.id in id_list and len(rec) >= min_length)
count = SeqIO.write(fasta_seq_iterator, fout, file_type)
if len(id_list) != count: # sanity check you may want to extract from a demultiplexed file
print("Warning, EOF reached but", len(id_list) - count, "sequences remained, is extractFile the original source?", file=sys.stderr)
def extract_bam_from_id(fileout, id_list, seqfile, min_length):
num_seq_to_extract = len(id_list)
with AlignmentFile(seqfile, 'rb', check_sq=False) as bam_in, \
AlignmentFile(fileout+".bam", 'wb', template=bam_in) as fout:
for read in bam_in.fetch(until_eof=True):
if read.query_name in id_list: # as set is more efficient than a list
#see https://wiki.python.org/moin/TimeComplexity
if not read.is_paired:
num_seq_to_extract -= 1
elif read.is_read2: # decrease counter only if we see the second read of our pair
num_seq_to_extract -= 1
if read.query_length >= min_length:
fout.write(read)
if not num_seq_to_extract:
break
def extract_seq_from_id(fileout, id_list, seqfile, data_type='bam', min_length=0):
if seqfile.endswith("fasta") or seqfile.endswith("fa") or seqfile.endswith("fas") or seqfile.endswith("fq") or seqfile.endswith("fastq") or seqfile.endswith("gz"):
data_type = 'fasta'
if data_type == 'fasta': extract_fasta_from_id(fileout, id_list, seqfile, min_length)
elif data_type == 'bam': extract_bam_from_id(fileout, id_list, seqfile, min_length)
def dfs_report (node, depth, related=[], infile="", extractFile=None, outdir="", zeros=False, clades=[], target_rank=None, min_count=0, minp=0.0, min_length=0):
global extract_ids # we share this list through the recursive calls
t_counts, c_counts, rank = taxo_counts[node], clade_counts[node], rank_map[node]
if (not c_counts and not zeros):
return
c_counts_percent = round(c_counts * 100 / seq_count, 2)
#filter on min seqences on clade
#filter on min percent
#filter on rank
if (not target_rank or target_rank == rank_code(rank)) and (c_counts >= min_count and c_counts_percent >= minp):
if node not in related: # TODO not in excluded, implement an 'excluded' switch
print ("{:6.2f}\t{}\t{}\t{}\t{}\t{}{}".format(
c_counts_percent,
c_counts,
t_counts,
rank_code(rank),
node,
" " * depth,
name_map[node]))
# start saving the sequence mames for this clade
if target_rank == rank_code(rank): extract_ids = set()
children = child_lists.get(node,[])
if len(children):
sorted_children = sorted(children, key=lambda k: clade_counts[k], reverse=True)
#format output only if not filtered by rank
if not target_rank : depth += 1
for child in sorted_children:
#dfs_report(child, depth)
dfs_report(child, depth, related, infile, extractFile, outdir, zeros, clades, target_rank, min_count, minp, min_length)
# we want to extract up to a certain clade from a certain rank,
# if there is a min sequences to extract, and only if a ref file is provided
if extractFile:
outdir = Path(outdir)
if not outdir.exists():
outdir.mkdir(parents=True)
if t_counts:# add only if the node has sequences assigned to it
# a set is more efficient than a list: see https://wiki.python.org/moin/TimeComplexity
extract_ids = extract_ids.union(seq_ids[node])
if (node in clades or rank_code(rank) == target_rank) and \
(c_counts_percent >= minp and len(extract_ids) >= min_count):
print ("Extracting",len(extract_ids),"sequences for",name_map[node], file=sys.stderr)
if "fa.kraken" in infile or "fq.kraken" in infile:
suffix_length = len("fa.kraken")
elif "fasta.kraken" in infile or "fastq.kraken" in infile:
suffix_length = len("fasta.kraken")
elif "bam.kraken" in infile:
suffix_length = len("bam.kraken")
else:
suffix_length = len("kraken")
# the names contains whitespaces
extract_seq_from_id(str(outdir / Path(infile).name[:-suffix_length])+name_map[node].replace(' ','_'), \
extract_ids, extractFile, min_length)
extract_ids = set()
def dfs_summation(node):
children = child_lists.get(node,[])
if len(children):
for child in children:
dfs_summation(child)
clade_counts[node] += clade_counts.get(child, 0)
#this function will discard child clades in order to have a proper summation
def dfs_related(node, node_list):
res = []
children = child_lists.get(node,[])
if len(children):
#iterate through all children
for child in children:
if child in node_list: res+=[child]
# recursively look for children
res += dfs_related(child, node_list)
return res
def _main():
parser = argparse.ArgumentParser(description='Create a report from a kraken output. Optionally extract reads')
parser.add_argument('--db', required=True,
help='The kraken database to use')
parser.add_argument('--zeros', action='store_true',
help='Show also 0')
parser.add_argument('--clades', default=[],
help='Select only specified clades (comma separated)')
parser.add_argument('--minp', default=0.0, type=float,
help='Filter on the minimum percent of sequences for this clade')
parser.add_argument('--min_count', default=0, type=int,
help='Filter on the minimum sequences for this clade')
parser.add_argument('--rank', help='Only return clades for specified rank')
parser.add_argument('--translate', help='Output for "translate" (read -> lineage)')
parser.add_argument('--extractFile', help='File where to extract sequence from')
parser.add_argument('--min_length', default=0, type=int, help='Minimum length filter')
parser.add_argument('infile', metavar="kraken.output")
parser.add_argument('--outdir', default="", help='Extracted reads directory')
args = parser.parse_args()
db_prefix = os.path.abspath(args.db)
if args.rank and len(args.rank) > 1:
args.rank = rank_code(rank)
global seq_ids
seq_ids = defaultdict(list)
#extract_ids = set()
load_taxonomy(db_prefix)
#name_map, rank_map, child_lists, node_name_map = load_taxonomy(db_prefix)
known_taxonomy_strings = {}
if args.translate:
with open(db_prefix+"/taxonomy/parent_map.json",'r') as parent_map_file:
parent_map = json.load(parent_map_file)
print("Map files loaded", file=sys.stderr)
if args.clades: # handle providing multiple clades, comma separated
args.clades = args.clades.split(",")
for idx, clade in enumerate(args.clades): # translate taxa names to number
if not clade in name_map:
try:
args.clades[idx] = node_name_map[clade.replace("_"," ")]
except KeyError:
print("Specified taxa {} not found, exiting", file=sys.stderr)
exit(1)
args.clades = set(args.clades)
global seq_count
seq_count = 0 # init the number of sequences
global taxo_counts
taxo_counts = defaultdict(int) # every new entry will be initialized to 0
with open(args.infile, 'r', newline='') as krakenfile, \
open(args.translate, "w") if args.translate else _ret_file(None) as translate:
kfile = reader(krakenfile, delimiter='\t')
for row in kfile:
taxo_counts[row[2]] += 1
seq_count += 1
seq_ids[row[2]].append(row[1])
if args.translate and row[0].startswith('C'):
print (row[1], get_taxonomy_str(row[2]), sep="\t", file=translate)
print(args.infile,"parsed", file=sys.stderr)
classified_count = seq_count - taxo_counts[0]
global clade_counts
clade_counts = taxo_counts.copy()
if args.clades:
#do the summation only once for each clade,
# that means, if we specify clades related to each other:
# e.g. 9606 9605, only the higher clade will be used
# as the descendant one will be recursively computed
related_clades=set()
for node in args.clades:
related_clades = related_clades.union(dfs_related(node, args.clades))
unrelated_clades = args.clades.difference(related_clades)
for clade in unrelated_clades:
dfs_summation(clade)
else:
dfs_summation('1')
unclassified_percent = 100
if seq_count:
unclassified_percent = clade_counts.get(0) * 100 / seq_count
if not args.clades and not args.rank:
print ("{:6.2f}\t{}\t{}\t{}\t{}\t{}{}".format(
unclassified_percent,
clade_counts.get(0), taxo_counts[0],
"U", 0, "", "unclassified"))
if args.clades:
related_clades=set()
for node in args.clades:
related_clades = related_clades.union(dfs_related(node, args.clades))
for clade in args.clades:
#dfs_report (node, depth, related=[], infile="", extractFile=None, outdir="", zeros=False, clades=[], target_rank=None, min_count=0, minp=0.0, min_length=0)
dfs_report(clade, 0, related_clades, args.infile, args.extractFile, args.outdir, args.zeros, args.clades, args.rank, args.min_count, args.minp, args.min_length)
else:
dfs_report('1', 0, [], args.infile, args.extractFile, args.outdir, args.zeros, args.clades, args.rank, args.min_count, args.minp, args.min_length)
if __name__ == "__main__":
name_map = rank_map = child_lists = node_name_map = clade_counts = taxo_counts = seq_count = extract_ids = seq_ids = None
_main()
| 9,428
| 0
| 163
|
337618b4d7d2e40d90ad9aa116afb1da1ebe6553
| 1,808
|
py
|
Python
|
pythonforandroid/recipes/lxml/__init__.py
|
gruns/python-for-android
|
1bee8a821d57c39492cf633112673bf6ce0be8db
|
[
"MIT"
] | 1
|
2022-01-26T18:42:36.000Z
|
2022-01-26T18:42:36.000Z
|
pythonforandroid/recipes/lxml/__init__.py
|
gruns/python-for-android
|
1bee8a821d57c39492cf633112673bf6ce0be8db
|
[
"MIT"
] | null | null | null |
pythonforandroid/recipes/lxml/__init__.py
|
gruns/python-for-android
|
1bee8a821d57c39492cf633112673bf6ce0be8db
|
[
"MIT"
] | 1
|
2020-05-24T16:28:13.000Z
|
2020-05-24T16:28:13.000Z
|
from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory
from pythonforandroid.toolchain import CompiledComponentsPythonRecipe
from pythonforandroid.util import current_directory, ensure_dir
from pythonforandroid.logger import debug, shprint, info
from os.path import exists, join, dirname
import sh
import glob
recipe = LXMLRecipe()
| 48.864865
| 162
| 0.714049
|
from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory
from pythonforandroid.toolchain import CompiledComponentsPythonRecipe
from pythonforandroid.util import current_directory, ensure_dir
from pythonforandroid.logger import debug, shprint, info
from os.path import exists, join, dirname
import sh
import glob
class LXMLRecipe(CompiledComponentsPythonRecipe):
version = '3.6.0'
url = 'https://pypi.python.org/packages/source/l/lxml/lxml-{version}.tar.gz'
depends = ['python2', 'libxml2', 'libxslt']
name = 'lxml'
call_hostpython_via_targetpython = False # Due to setuptools
def should_build(self, arch):
super(LXMLRecipe, self).should_build(arch)
return True
return not exists(join(self.ctx.get_libs_dir(arch.arch), 'etree.so'))
def build_arch(self, arch):
super(LXMLRecipe, self).build_arch(arch)
shutil.copyfile('%s/build/lib.linux-x86_64-2.7/lxml/etree.so' % self.get_build_dir(arch.arch), join(self.ctx.get_libs_dir(arch.arch), 'etree.so'))
shutil.copyfile('%s/build/lib.linux-x86_64-2.7/lxml/objectify.so' % self.get_build_dir(arch.arch), join(self.ctx.get_libs_dir(arch.arch), 'objectify.so'))
def get_recipe_env(self, arch):
env = super(LXMLRecipe, self).get_recipe_env(arch)
bxml = "/home/zgoldberg/.local/share/python-for-android/build/other_builds/libxml2/armeabi/libxml2/"
bxsl = "/home/zgoldberg/.local/share/python-for-android/build/other_builds/libxslt/armeabi/libxslt"
targetpython = "%s/include/python2.7/" % dirname(dirname(self.ctx.hostpython))
env['CC'] += " -I%s/include -I%s -I%s" % (bxml, bxsl, targetpython)
env['LDSHARED'] = '%s -nostartfiles -shared -fPIC -lpython2.7' % env['CC']
return env
recipe = LXMLRecipe()
| 1,081
| 344
| 23
|
6718bf877b76b460a1a15a78d00cdea02e635882
| 12,871
|
py
|
Python
|
gerrit-report2.py
|
rfrandse/test
|
e2e5cb46fecff1be16b3089dd22a8cb6e99f3ac9
|
[
"Unlicense"
] | null | null | null |
gerrit-report2.py
|
rfrandse/test
|
e2e5cb46fecff1be16b3089dd22a8cb6e99f3ac9
|
[
"Unlicense"
] | null | null | null |
gerrit-report2.py
|
rfrandse/test
|
e2e5cb46fecff1be16b3089dd22a8cb6e99f3ac9
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python
import argparse
import subprocess
import json
import re
import config
import collections
from datetime import datetime, timedelta
import time
from pprint import pprint
from slacker import Slacker
slack = Slacker(config.token)
option_age = ""
option_owner = None
option_protocol = 'slack'
option_ssm = None
option_stat = None
query_cache = {}
HOST="openbmc.gerrit"
username_map = {
'irc': {
'jenkins-openbmc': "Jenkins",
'williamspatrick': "stwcx",
},
'slack': {
'amboar': "@arj",
'anoo1': "@anoo",
'bradbishop': "@bradleyb",
'bjwyman': "@v2cib530",
'cbostic': "@cbostic",
'dhruvibm': "@dhruvaraj",
'dkodihal': "@dkodihal",
'devenrao': "@devenrao",
'geissonator': "@andrewg",
'eddiejames': "@eajames",
'gtmills': "@gmills",
'jenkins-openbmc': "Jenkins",
'jk-ozlabs' : "@jk",
'mine260309': "@shyulei",
'msbarth': "@msbarth",
'mtritz': "@mtritz",
'ngorugan': "@ngorugan",
'navrathi' : "@navrathi",
'ojayanth': "@ojayanth",
'ratagupt': "@ratagupt",
'shenki': "@jms",
'spinler': "@spinler",
'tomjoseph83': "@tomjoseph",
},
}
project_map = {
'openbmc/witherspoon-pfault-analysis': ('spinler','Matt Spinler'),
'openbmc/phosphor-mrw-tools':('spinler','Matt Spinler'),
'openbmc/mboxbridge': ('amboar','Andrew Jeffery'),
'openbmc/obmc-console': ('jk-ozlabs','Jeremy Kerr'),
'openbmc/btbridge': ('jk-ozlabs','Jeremy Kerr'),
'openbmc/inarp': ('jk-ozlabs','Jeremy Kerr'),
'openbmc/phosphor-settingsd' :('dkodihal','Deepak Kodihalli'),
'openbmc/phosphor-logging' :('dkodihal','Deepak Kodihalli'),
'openbmc/openpower-vpd-parser': ('dkodihal','Deepak Kodihalli'),
'openbmc/phosphor-mboxd': ('amboar','Andrew Jeffery'),
'openbmc/openbmc': ('bradbishop','Brad Bishop'),
'openbmc/phosphor-host-ipmid': ('tomjoseph83','Tom Joseph')
}
send_to_slack = ['@andrewg',
'@anoo',
'@arj',
'@bradleyb',
'@cbostic',
'@devenrao',
'@dkodihal',
'@dhruvaraj',
'@eajames',
'@gmills',
'@jms',
'@jk',
'@msbarth',
'@mtritz',
'@navrathi',
'@ngorugan',
'@ojayanth',
'@ratagupt',
'@spinler',
'@tomjoseph',
'@v2cib530']
# print "sending stats to openbmcdev channel"
# slack.chat.post_message('#openbmcdev',message)
parser = argparse.ArgumentParser()
parser.add_argument('--owner', help='Change owner', type=str,
action='append')
parser.add_argument('--protocol', help='Protocol for username conversion',
type=str, choices=(username_map.keys()))
parser.add_argument('-sm', action='store_true',help='send slack message flag')
parser.add_argument('-stat', action='store_true',help='send statistics to slack flag')
subparsers = parser.add_subparsers()
report = subparsers.add_parser('report', help='Generate report')
report.set_defaults(func=do_report)
args = parser.parse_args()
if ('owner' in args) and args.owner:
option_owner = " OR ".join(map(lambda x: "owner:" + x,
args.owner))
if 'protocol' in args and args.protocol:
option_protocol = args.protocol
if args.sm:
option_ssm = 'True'
print("will send messages to slack")
else:
print("no slack messges will be sent")
if args.stat:
option_stat = 'True'
if 'func' in args:
args.func(args)
else:
parser.print_help()
| 31.316302
| 99
| 0.569963
|
#!/usr/bin/python
import argparse
import subprocess
import json
import re
import config
import collections
from datetime import datetime, timedelta
import time
from pprint import pprint
from slacker import Slacker
slack = Slacker(config.token)
option_age = ""
option_owner = None
option_protocol = 'slack'
option_ssm = None
option_stat = None
query_cache = {}
HOST="openbmc.gerrit"
def query(*args):
COMMAND = """gerrit query \
--format json --all-reviewers \
--dependencies --current-patch-set -- \
'%s'""" % " ".join(args)
s = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
results = list(map(json.loads, s.stdout.read().splitlines()))
# print json.dumps(results,indent=4)
del results[-1]
for r in results:
query_cache[r['id']] = r
return results
def changes():
args = ""
if option_owner:
args += " ( {0} )".format(option_owner)
return query(args,
"status:open", "-is:draft", "-label:Code-Review=-2",
"-project:openbmc/openbmc-test-automation")
def change_by_id(change_id):
if change_id in query_cache:
return query_cache[change_id]
c = query(change_id)
if len(c):
return c[0]
return None
username_map = {
'irc': {
'jenkins-openbmc': "Jenkins",
'williamspatrick': "stwcx",
},
'slack': {
'amboar': "@arj",
'anoo1': "@anoo",
'bradbishop': "@bradleyb",
'bjwyman': "@v2cib530",
'cbostic': "@cbostic",
'dhruvibm': "@dhruvaraj",
'dkodihal': "@dkodihal",
'devenrao': "@devenrao",
'geissonator': "@andrewg",
'eddiejames': "@eajames",
'gtmills': "@gmills",
'jenkins-openbmc': "Jenkins",
'jk-ozlabs' : "@jk",
'mine260309': "@shyulei",
'msbarth': "@msbarth",
'mtritz': "@mtritz",
'ngorugan': "@ngorugan",
'navrathi' : "@navrathi",
'ojayanth': "@ojayanth",
'ratagupt': "@ratagupt",
'shenki': "@jms",
'spinler': "@spinler",
'tomjoseph83': "@tomjoseph",
},
}
project_map = {
'openbmc/witherspoon-pfault-analysis': ('spinler','Matt Spinler'),
'openbmc/phosphor-mrw-tools':('spinler','Matt Spinler'),
'openbmc/mboxbridge': ('amboar','Andrew Jeffery'),
'openbmc/obmc-console': ('jk-ozlabs','Jeremy Kerr'),
'openbmc/btbridge': ('jk-ozlabs','Jeremy Kerr'),
'openbmc/inarp': ('jk-ozlabs','Jeremy Kerr'),
'openbmc/phosphor-settingsd' :('dkodihal','Deepak Kodihalli'),
'openbmc/phosphor-logging' :('dkodihal','Deepak Kodihalli'),
'openbmc/openpower-vpd-parser': ('dkodihal','Deepak Kodihalli'),
'openbmc/phosphor-mboxd': ('amboar','Andrew Jeffery'),
'openbmc/openbmc': ('bradbishop','Brad Bishop'),
'openbmc/phosphor-host-ipmid': ('tomjoseph83','Tom Joseph')
}
def map_username(user):
return username_map[option_protocol].get(
user[0], "[{0}: {1}]".format(user[0].encode('utf-8'), user[1].encode('utf-8')))
def map_approvals(approvals, owner):
mapped = {}
for a in approvals:
approval_type = a['type']
approval_owner = (a['by']['username'], a['by'].get('name'))
approval_score = int(a['value'])
if approval_type not in mapped:
mapped[approval_type] = {}
# Don't allow the owner to self-+1 on code-reviews.
if approval_type == 'Code-Review' and approval_owner == owner and \
approval_score > 0:
continue
mapped[approval_type][approval_owner] = approval_score
return mapped
def map_reviewers(reviewers, owner):
mapped = []
for r in reviewers:
if 'username' in r:
reviewer_user = r['username']
else:
reviewer_user = "Anonymous-User"
if 'name' in r:
reviewer_name = r['name']
else:
reviewer_name = "Anonymous Coward"
if reviewer_user == 'jenkins-openbmc':
continue
reviewer_username = (reviewer_user, reviewer_name)
if reviewer_user == owner[0]:
continue
mapped.append(reviewer_username)
return mapped
def map_project_reviewer(project_name):
if project_map.get(project_name) is None:
return ('bradbishop','Brad Bishop')
return project_map.get(project_name)
def reason(change):
subject = change['subject']
if change['owner'].get('name'):
real_name = change['owner'].get('name')
else:
real_name = change['owner']['username']
owner = (change['owner']['username'], real_name)
if 'allReviewers' in change:
reviewers = map_reviewers(change['allReviewers'], owner)
else:
reviewers = []
if 'approvals' in change['currentPatchSet']:
approvals = map_approvals(change['currentPatchSet']['approvals'], owner)
else:
approvals = {}
if len(reviewers) < 2:
return ("{0} has added insufficient reviewers.", [owner], None)
if ('Verified' in approvals):
verified = approvals['Verified']
scores = list(filter(lambda x: verified[x] < 0, verified))
if len(scores):
return ("{0} should resolve verification failure.", [owner], None)
if ('Code-Review' not in approvals):
return ("Missing code review by {0}.", reviewers, None)
reviewed = approvals['Code-Review']
rejected_by = list(filter(lambda x: reviewed[x] < 0, reviewed))
if len(rejected_by):
return ("{0} should resolve code review comments.", [owner], None)
reviewed_by = list(filter(lambda x: reviewed[x] > 0, reviewed))
if len(reviewed_by) < 2:
return ("Missing code review by {0}.",
set(reviewers) - set(reviewed_by), None)
if ('Verified' not in approvals):
return ("May be missing Jenkins verification ({0}).", [owner], None)
if ('dependsOn' in change) and (len(change['dependsOn'])):
for dep in change['dependsOn']:
if not dep['isCurrentPatchSet']:
return ("Depends on out of date patch set {1} ({0}).",
[owner], dep['id'])
dep_info = change_by_id(dep['id'])
if not dep_info:
continue
if dep_info['status'] != "MERGED":
return ("Depends on unmerged patch set {1} ({0}).",
[owner], dep['id'])
approved_by = list(filter(lambda x: reviewed[x] == 2, reviewed))
project_reviewer = map_project_reviewer(change['project'])
if len(approved_by):
return ("Ready for merge by {0}.", approved_by, None)
else:
return ("Awaiting merge review by {0}", [project_reviewer] , None)
send_to_slack = ['@andrewg',
'@anoo',
'@arj',
'@bradleyb',
'@cbostic',
'@devenrao',
'@dkodihal',
'@dhruvaraj',
'@eajames',
'@gmills',
'@jms',
'@jk',
'@msbarth',
'@mtritz',
'@navrathi',
'@ngorugan',
'@ojayanth',
'@ratagupt',
'@spinler',
'@tomjoseph',
'@v2cib530']
def do_report(args):
action_list = {}
stat_list = {}
oldest_action = {}
oldest_review = {}
for c in changes():
patchCreatedOn = c['currentPatchSet']['createdOn']
structTime = time.gmtime(patchCreatedOn)
timePatchCreatedOn = datetime(*structTime[:6])
timePatchCreatedOn -= timedelta(hours=5)
dCTM = datetime.now() - timePatchCreatedOn
print("{0} - {1}".format(c['url'], c['id']))
print(c['subject'].encode('utf-8'))
(r, people, dep) = reason(c)
people = ", ".join(map(map_username, people))
print(r.format(people, dep))
print("patch age:%s") % dCTM
print("----")
if "Depends on unmerged patch set" in r.format(people, dep):
continue
plist = people.split(",")
for p in plist:
p = p.strip()
message = "{0} - {1}".format(c['url'], c['id'].encode('utf-8'))
message = message + "\n" + c['subject'].encode('utf-8') + "\n" + r.format(people, dep)
message += "\npatch age:" + str(dCTM) + "\n----"
pattern = re.compile('bump version')
match_all = pattern.findall(c['subject'])
if match_all:
continue
action_list.setdefault(p, []).append(message)
if "Missing code review" in message:
if p not in oldest_action:
oldest_action.setdefault(p, []).append(patchCreatedOn)
oldest_action[p]= patchCreatedOn
elif oldest_action[p] > patchCreatedOn:
oldest_action[p] = patchCreatedOn
for slack_name, action_description in action_list.iteritems():
print "~~~~"
print slack_name
total_actions_message = "Number of Actions: %d" % len(action_description)
print total_actions_message
if option_ssm and slack_name in send_to_slack:
try:
slack.chat.post_message(slack_name, total_actions_message)
except Exception as e:
print slack_name + "hit exception:",
print e
review_count = 0
for description in action_description:
if slack_name in send_to_slack:
print description
if "Missing code review" in description:
review_count += 1
if option_ssm and slack_name in send_to_slack:
# print description
try:
slack.chat.post_message(slack_name, description)
except Exception as e:
print slack_name + "hit exception:",
print e
print "Number of Reviews: %d" % review_count
if slack_name in oldest_action:
structTime = time.gmtime(oldest_action[slack_name])
timePatchCreatedOn = datetime(*structTime[:6])
timePatchCreatedOn -= timedelta(hours=5)
dCTM = datetime.now() - timePatchCreatedOn
print "Oldest Action: %s" % dCTM
stat_list.setdefault(slack_name, []).append(review_count)
message = ""
for check_name in username_map['slack']:
slack_name = username_map['slack'][check_name]
if slack_name == 'Jenkins':
continue
if slack_name not in stat_list:
message = message + "%s has [0] reviews, oldest patch age:\n" % (slack_name)
sorted_stat_list = sorted(stat_list.items(), key=lambda x: (x[1],x[0]))
# sorted_stat_list.remove(('', [0]))
for s_name, cnt in sorted_stat_list:
if s_name in username_map['slack'].values():
dCTM = ""
if s_name in oldest_action:
structTime = time.gmtime(oldest_action[s_name])
timePatchCreatedOn = datetime(*structTime[:6])
timePatchCreatedOn -= timedelta(hours=5)
dCTM = datetime.now() - timePatchCreatedOn
message = message + "%s has %s reviews, oldest patch age: %s\n" % (s_name, cnt, dCTM)
print message
if option_stat:
print "sending stats to sprint_review_week channel"
slack.chat.post_message('#sprint_review_week',message)
# print "sending stats to openbmcdev channel"
# slack.chat.post_message('#openbmcdev',message)
parser = argparse.ArgumentParser()
parser.add_argument('--owner', help='Change owner', type=str,
action='append')
parser.add_argument('--protocol', help='Protocol for username conversion',
type=str, choices=(username_map.keys()))
parser.add_argument('-sm', action='store_true',help='send slack message flag')
parser.add_argument('-stat', action='store_true',help='send statistics to slack flag')
subparsers = parser.add_subparsers()
report = subparsers.add_parser('report', help='Generate report')
report.set_defaults(func=do_report)
args = parser.parse_args()
if ('owner' in args) and args.owner:
option_owner = " OR ".join(map(lambda x: "owner:" + x,
args.owner))
if 'protocol' in args and args.protocol:
option_protocol = args.protocol
if args.sm:
option_ssm = 'True'
print("will send messages to slack")
else:
print("no slack messges will be sent")
if args.stat:
option_stat = 'True'
if 'func' in args:
args.func(args)
else:
parser.print_help()
| 8,870
| 0
| 207
|
969e299e36bacb90a9c16e54707bc80e4d9f750d
| 1,773
|
py
|
Python
|
rlscore/test/test_measure/test_cindex.py
|
vishalbelsare/RLScore
|
713f0a402f7a09e41a609f2ddcaf849b2021a0a7
|
[
"MIT"
] | 61
|
2015-03-06T08:48:01.000Z
|
2021-04-26T16:13:07.000Z
|
rlscore/test/test_measure/test_cindex.py
|
andrecamara/RLScore
|
713f0a402f7a09e41a609f2ddcaf849b2021a0a7
|
[
"MIT"
] | 5
|
2016-09-08T15:47:00.000Z
|
2019-02-25T17:44:55.000Z
|
rlscore/test/test_measure/test_cindex.py
|
vishalbelsare/RLScore
|
713f0a402f7a09e41a609f2ddcaf849b2021a0a7
|
[
"MIT"
] | 31
|
2015-01-28T15:05:33.000Z
|
2021-04-16T19:39:48.000Z
|
import numpy as np
import unittest
from rlscore.measure.cindex_measure import cindex
from rlscore.measure.measure_utilities import UndefinedPerformance
| 30.568966
| 66
| 0.56176
|
import numpy as np
import unittest
from rlscore.measure.cindex_measure import cindex
from rlscore.measure.measure_utilities import UndefinedPerformance
def slow_cindex(Y, P):
correct = Y
predictions = P
assert len(correct) == len(predictions)
disagreement = 0.
decisions = 0.
for i in range(len(correct)):
for j in range(len(correct)):
if correct[i] > correct[j]:
decisions += 1.
if predictions[i] < predictions[j]:
disagreement += 1.
elif predictions[i] == predictions[j]:
disagreement += 0.5
#Disagreement error is not defined for cases where there
#are no disagreeing pairs
disagreement /= decisions
return 1. - disagreement
class Test(unittest.TestCase):
def testCindex(self):
y = np.random.random(100)
p = np.random.random(100)
perf = cindex(y,p)
perf2 = slow_cindex(y,p)
self.assertAlmostEqual(perf, perf2)
y = np.random.random(10000)
p = np.ones(10000)
self.assertEqual(cindex(y,p), 0.5)
#9 pairs
y = np.array([1,2,3,3,4])
p = np.array([-4,1,5,5,7])
#0 inversions
self.assertEqual(cindex(y,p), 1.0)
#1 inversion
p = np.array([-4,1,8,5,7])
self.assertAlmostEqual(cindex(y,p), 8./9.)
#1.5 inversions
p = np.array([-4,1,8,7,7])
self.assertAlmostEqual(cindex(y,p), 7.5/9.)
#all wrong
p = np.array([10,9,8,7,6])
self.assertEqual(cindex(y,p), 0.)
#all tied
p = np.array([10,10,10,10,10])
self.assertEqual(cindex(y,p), 0.5)
self.assertRaises(UndefinedPerformance, cindex, p, p)
| 1,531
| 9
| 73
|
c1a33fc9fe9a26dbd52187a7e35bad49770d75e5
| 9,457
|
py
|
Python
|
pycat/base/event/window_event_manager.py
|
cmorace/pycat
|
7abc53f90a03b4961c10003eaca2c01efec9e4d2
|
[
"MIT"
] | null | null | null |
pycat/base/event/window_event_manager.py
|
cmorace/pycat
|
7abc53f90a03b4961c10003eaca2c01efec9e4d2
|
[
"MIT"
] | null | null | null |
pycat/base/event/window_event_manager.py
|
cmorace/pycat
|
7abc53f90a03b4961c10003eaca2c01efec9e4d2
|
[
"MIT"
] | null | null | null |
"""The window_event_manager module implements the WindowEventManager class."""
from typing import Callable, Dict, List, Set, Union
from pyglet.window import Window as PygletWindow
from pycat.base.event.key_event import KeyEvent
from pycat.base.event.mouse_event import MouseEvent
from pycat.geometry.point import Point
from pycat.debug.print import print_failure
from pycat.base.event.publisher import Subscriber, Publisher
from pycat.base.event.window_event_subscriber import WindowEventSubscriber
class WindowEventManager:
"""Manage pyglet window events.
- Adds support for multiple callbacks on window events.
- Tracks currently pressed keys and mouse position
- Simplifies window event callback function signatures
"""
def __init__(self, window: PygletWindow):
"""Instantiate new instance of WindowEventManager class.
:param window: the window whose events are to be managed
:type window: `pyglet.window.Window`
"""
self.__mouse_position = Point()
self.__mouse_delta = Point()
self.__mouse_scroll_delta = Point()
self.__active_keys: Set[Union[int, str]] = set()
self.__active_key: Union[int, str] = ""
self.__publishers: Dict[str, Publisher] = {
"on_key_press": Publisher[Callable[[KeyEvent], None]](),
"on_key_release": Publisher[Callable[[KeyEvent], None]](),
"on_mouse_drag": Publisher[Callable[[MouseEvent], None]](),
"on_mouse_enter": Publisher[Callable[[MouseEvent], None]](),
"on_mouse_leave": Publisher[Callable[[MouseEvent], None]](),
"on_mouse_motion": Publisher[Callable[[MouseEvent], None]](),
"on_mouse_press": Publisher[Callable[[MouseEvent], None]](),
"on_mouse_release": Publisher[Callable[[MouseEvent], None]](),
"on_mouse_scroll": Publisher[Callable[[MouseEvent], None]](),
}
window.on_key_press = self.__on_key_press
window.on_key_release = self.__on_key_release
window.on_mouse_drag = self.__on_mouse_drag
window.on_mouse_enter = self.__on_mouse_enter
window.on_mouse_leave = self.__on_mouse_leave
window.on_mouse_motion = self.__on_mouse_motion
window.on_mouse_press = self.__on_mouse_press
window.on_mouse_release = self.__on_mouse_release
window.on_mouse_scroll = self.__on_mouse_scroll
@property
def mouse_position(self) -> Point:
"""Return the current mouse position.
If the mouse has exited the window,
will return the last mouse position before exiting
:return: the current mouse position
:rtype: Point
"""
return self.__mouse_position
@property
def mouse_delta(self) -> Point:
"""Return the current mouse position.
If the mouse has exited the window,
will return the last mouse position before exiting
:return: the current mouse position
:rtype: Point
"""
return self.__mouse_delta
@property
@property
def active_keys(self) -> Set[Union[int, str]]:
"""Return a set of the currently pressed keys.
Key codes constants are defined in `pycat.keyboard.KEY`
:return: set of currently pressed keys
:rtype: Set[int]
"""
return self.__active_keys
def add_subscribers(self, **kwargs: Union[Subscriber, List[Subscriber]]):
"""Add subscribers by event keyword."""
for key in kwargs:
if key in self.__publishers:
self.__publishers[key].add_subscribers(kwargs[key])
else:
self.__invalid_event_name(key)
def remove_subscribers(self, **kwargs: Union[Subscriber,
List[Subscriber]]):
"""Remove subscribers by event keyword."""
for key in kwargs:
if key in self.__publishers:
self.__publishers[key].remove_subscribers(kwargs[key])
else:
self.__invalid_event_name(key)
# Key Events
# ------------------------------------------------------------------------
# Mouse Events
# ------------------------------------------------------------------------
| 40.072034
| 78
| 0.639103
|
"""The window_event_manager module implements the WindowEventManager class."""
from typing import Callable, Dict, List, Set, Union
from pyglet.window import Window as PygletWindow
from pycat.base.event.key_event import KeyEvent
from pycat.base.event.mouse_event import MouseEvent
from pycat.geometry.point import Point
from pycat.debug.print import print_failure
from pycat.base.event.publisher import Subscriber, Publisher
from pycat.base.event.window_event_subscriber import WindowEventSubscriber
class WindowEventManager:
"""Manage pyglet window events.
- Adds support for multiple callbacks on window events.
- Tracks currently pressed keys and mouse position
- Simplifies window event callback function signatures
"""
def __init__(self, window: PygletWindow):
"""Instantiate new instance of WindowEventManager class.
:param window: the window whose events are to be managed
:type window: `pyglet.window.Window`
"""
self.__mouse_position = Point()
self.__mouse_delta = Point()
self.__mouse_scroll_delta = Point()
self.__active_keys: Set[Union[int, str]] = set()
self.__active_key: Union[int, str] = ""
self.__publishers: Dict[str, Publisher] = {
"on_key_press": Publisher[Callable[[KeyEvent], None]](),
"on_key_release": Publisher[Callable[[KeyEvent], None]](),
"on_mouse_drag": Publisher[Callable[[MouseEvent], None]](),
"on_mouse_enter": Publisher[Callable[[MouseEvent], None]](),
"on_mouse_leave": Publisher[Callable[[MouseEvent], None]](),
"on_mouse_motion": Publisher[Callable[[MouseEvent], None]](),
"on_mouse_press": Publisher[Callable[[MouseEvent], None]](),
"on_mouse_release": Publisher[Callable[[MouseEvent], None]](),
"on_mouse_scroll": Publisher[Callable[[MouseEvent], None]](),
}
window.on_key_press = self.__on_key_press
window.on_key_release = self.__on_key_release
window.on_mouse_drag = self.__on_mouse_drag
window.on_mouse_enter = self.__on_mouse_enter
window.on_mouse_leave = self.__on_mouse_leave
window.on_mouse_motion = self.__on_mouse_motion
window.on_mouse_press = self.__on_mouse_press
window.on_mouse_release = self.__on_mouse_release
window.on_mouse_scroll = self.__on_mouse_scroll
@property
def mouse_position(self) -> Point:
"""Return the current mouse position.
If the mouse has exited the window,
will return the last mouse position before exiting
:return: the current mouse position
:rtype: Point
"""
return self.__mouse_position
@property
def mouse_delta(self) -> Point:
"""Return the current mouse position.
If the mouse has exited the window,
will return the last mouse position before exiting
:return: the current mouse position
:rtype: Point
"""
return self.__mouse_delta
@property
def active_key(self) -> Union[int, str]:
return self.__active_key
@property
def active_keys(self) -> Set[Union[int, str]]:
"""Return a set of the currently pressed keys.
Key codes constants are defined in `pycat.keyboard.KEY`
:return: set of currently pressed keys
:rtype: Set[int]
"""
return self.__active_keys
def add_window_event_subscriber(self,
subscriber: WindowEventSubscriber):
self.__publishers["on_key_press"].add_subscribers(
subscriber.on_key_press)
self.__publishers["on_key_release"].add_subscribers(
subscriber.on_key_release)
self.__publishers["on_mouse_drag"].add_subscribers(
subscriber.on_mouse_drag)
self.__publishers["on_mouse_enter"].add_subscribers(
subscriber.on_mouse_enter)
self.__publishers["on_mouse_leave"].add_subscribers(
subscriber.on_mouse_leave)
self.__publishers["on_mouse_motion"].add_subscribers(
subscriber.on_mouse_motion)
self.__publishers["on_mouse_press"].add_subscribers(
subscriber.on_mouse_press)
self.__publishers["on_mouse_release"].add_subscribers(
subscriber.on_mouse_release)
self.__publishers["on_mouse_scroll"].add_subscribers(
subscriber.on_mouse_scroll)
def remove_window_event_subscriber(self,
subscriber: WindowEventSubscriber):
self.__publishers["on_key_press"].remove_subscribers(
subscriber.on_key_press)
self.__publishers["on_key_release"].remove_subscribers(
subscriber.on_key_release)
self.__publishers["on_mouse_drag"].remove_subscribers(
subscriber.on_mouse_drag)
self.__publishers["on_mouse_enter"].remove_subscribers(
subscriber.on_mouse_enter)
self.__publishers["on_mouse_leave"].remove_subscribers(
subscriber.on_mouse_leave)
self.__publishers["on_mouse_motion"].remove_subscribers(
subscriber.on_mouse_motion)
self.__publishers["on_mouse_press"].remove_subscribers(
subscriber.on_mouse_press)
self.__publishers["on_mouse_release"].remove_subscribers(
subscriber.on_mouse_release)
self.__publishers["on_mouse_scroll"].remove_subscribers(
subscriber.on_mouse_scroll)
def add_subscribers(self, **kwargs: Union[Subscriber, List[Subscriber]]):
"""Add subscribers by event keyword."""
for key in kwargs:
if key in self.__publishers:
self.__publishers[key].add_subscribers(kwargs[key])
else:
self.__invalid_event_name(key)
def remove_subscribers(self, **kwargs: Union[Subscriber,
List[Subscriber]]):
"""Remove subscribers by event keyword."""
for key in kwargs:
if key in self.__publishers:
self.__publishers[key].remove_subscribers(kwargs[key])
else:
self.__invalid_event_name(key)
def __invalid_event_name(self, name: str):
msg = str(name) + " is an invalid window event name."
print_failure(msg)
assert name in self.__publishers # throw an assertion error
def _update_publishers(self):
for publisher in self.__publishers.values():
publisher.update()
# Key Events
# ------------------------------------------------------------------------
def __on_key_press(self, symbol: int, mod: int):
self._update_publishers()
e = KeyEvent(symbol, mod)
if e.character:
self.__active_key = e.character
else:
self.__active_key = e.symbol
self.__publishers["on_key_press"].publish(e)
self._update_publishers()
def __on_key_release(self, symbol: int, mod: int):
self._update_publishers()
e = KeyEvent(symbol, mod)
if e.character == self.__active_key or e.symbol == self.__active_key:
self.__active_key = ""
self.__publishers["on_key_release"].publish(e)
self._update_publishers()
# Mouse Events
# ------------------------------------------------------------------------
def __on_mouse_drag(self, x: int, y: int, dx: int, dy: int, b: int,
m: int):
self._update_publishers()
self.__mouse_position.set(x, y)
self.__mouse_delta.set(dx, dy)
event = MouseEvent(x, y, dx, dy, b, m)
self.__publishers["on_mouse_drag"].publish(event)
self._update_publishers()
def __on_mouse_enter(self, x: int, y: int):
self._update_publishers()
self.__mouse_position.set(x, y)
event = MouseEvent(x, y)
self.__publishers["on_mouse_enter"].publish(event)
self._update_publishers()
def __on_mouse_leave(self, x: int, y: int):
self._update_publishers()
self.__mouse_position.set(x, y)
event = MouseEvent(x, y)
self.__publishers["on_mouse_leave"].publish(event)
self._update_publishers()
def __on_mouse_motion(self, x: int, y: int, dx: int, dy: int):
self._update_publishers()
self.__mouse_position.set(x, y)
self.__mouse_delta.set(dx, dy)
event = MouseEvent(x, y, dx, dy)
self.__publishers["on_mouse_motion"].publish(event)
self._update_publishers()
def __on_mouse_press(self, x: int, y: int, b: int, m: int):
self._update_publishers()
self.__mouse_position.set(x, y)
event = MouseEvent(x, y, 0, 0, b, m)
self.__publishers["on_mouse_press"].publish(event)
self._update_publishers()
def __on_mouse_release(self, x: int, y: int, b: int, m: int):
self._update_publishers()
self.__mouse_position.set(x, y)
event = MouseEvent(x, y, 0, 0, b, m)
self.__publishers["on_mouse_release"].publish(event)
self._update_publishers()
def __on_mouse_scroll(self, x: int, y: int, dx: int, dy: int):
self._update_publishers()
self.__mouse_position.set(x, y)
self.__mouse_scroll_delta.set(dx, dy)
event = MouseEvent(x, y, dx, dy)
self.__publishers["on_mouse_scroll"].publish(event)
self._update_publishers()
| 4,804
| 0
| 375
|
40bf95dfded92821b08f049465563ccb6b9628be
| 949
|
py
|
Python
|
itrack/urls.py
|
AmlHanfy/iTrack-Project
|
6afe64ff3bd78c6c9dc93d68c0ed52708a8dcb1a
|
[
"Apache-2.0"
] | null | null | null |
itrack/urls.py
|
AmlHanfy/iTrack-Project
|
6afe64ff3bd78c6c9dc93d68c0ed52708a8dcb1a
|
[
"Apache-2.0"
] | null | null | null |
itrack/urls.py
|
AmlHanfy/iTrack-Project
|
6afe64ff3bd78c6c9dc93d68c0ed52708a8dcb1a
|
[
"Apache-2.0"
] | 3
|
2018-01-23T19:08:22.000Z
|
2018-09-25T06:47:24.000Z
|
'''
this module contains all the urls for the whole project
'''
from django.contrib import admin
from django.urls import path,include
from article import views as articleViews
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
from django.conf.urls.static import static
# from django.views.generic import RedirectView
urlpatterns = [
path('admin/', admin.site.urls),
path('article/', include('article.urls')),
path('auth/', include('social_django.urls', namespace='social')),
path('watch/', include('watch_course.urls', namespace='watch')),
# path('', RedirectView.as_view(url='/questionnaire/')),
path('', include('questionnaire.urls',namespace='questionnaire')),
path('users/', include('users.urls',namespace='user')),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 36.5
| 77
| 0.72392
|
'''
this module contains all the urls for the whole project
'''
from django.contrib import admin
from django.urls import path,include
from article import views as articleViews
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
from django.conf.urls.static import static
# from django.views.generic import RedirectView
urlpatterns = [
path('admin/', admin.site.urls),
path('article/', include('article.urls')),
path('auth/', include('social_django.urls', namespace='social')),
path('watch/', include('watch_course.urls', namespace='watch')),
# path('', RedirectView.as_view(url='/questionnaire/')),
path('', include('questionnaire.urls',namespace='questionnaire')),
path('users/', include('users.urls',namespace='user')),
]
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 0
| 0
| 0
|
962ccfa4e838d3a0b196a2a719fdce3234b0b6c1
| 764
|
py
|
Python
|
stringsetup.py
|
TeamDragons/Dragons-Userbot
|
e40584f4dc898b785523adea4519ffb412dc92c4
|
[
"MIT"
] | 3
|
2021-08-11T08:33:39.000Z
|
2022-01-17T00:09:11.000Z
|
stringsetup.py
|
sophiashirashaki/Dragons-Userbot
|
2255c18428e488a267eaec1ec8c081fabcde167a
|
[
"MIT"
] | null | null | null |
stringsetup.py
|
sophiashirashaki/Dragons-Userbot
|
2255c18428e488a267eaec1ec8c081fabcde167a
|
[
"MIT"
] | 6
|
2021-08-14T08:20:41.000Z
|
2022-03-20T02:31:45.000Z
|
#!/usr/bin/env python3
# (c) https://t.me/TelethonChat/37677
# This Source Code Form is subject to the terms of the GNU
# MIT TeamDragons If a copy of the developer was not distributed with this
# file, You can obtain one at https://www.gnu.org/licenses/MIT/TeamDragons
from telethon.sessions import StringSession
from telethon.sync import TelegramClient
print(
"""Please go-to my.telegram.org
Login using your Telegram account
Click on API Development Tools
Create a new application, by entering the required details"""
)
APP_ID = int(input("MASUKAN API KEY : "))
API_HASH = input("MASUKAN API HASH : ")
with TelegramClient(StringSession(), APP_ID, API_HASH) as client:
print(client.session.save())
client.send_message("me", client.session.save())
| 34.727273
| 74
| 0.755236
|
#!/usr/bin/env python3
# (c) https://t.me/TelethonChat/37677
# This Source Code Form is subject to the terms of the GNU
# MIT TeamDragons If a copy of the developer was not distributed with this
# file, You can obtain one at https://www.gnu.org/licenses/MIT/TeamDragons
from telethon.sessions import StringSession
from telethon.sync import TelegramClient
print(
"""Please go-to my.telegram.org
Login using your Telegram account
Click on API Development Tools
Create a new application, by entering the required details"""
)
APP_ID = int(input("MASUKAN API KEY : "))
API_HASH = input("MASUKAN API HASH : ")
with TelegramClient(StringSession(), APP_ID, API_HASH) as client:
print(client.session.save())
client.send_message("me", client.session.save())
| 0
| 0
| 0
|
8c0aba7c3f75f22e54bc31dcc941aa754266bf3a
| 1,650
|
py
|
Python
|
permutation.py
|
pranaylobo/Team-Kalm-CP
|
daa967d84ccd162efc0b7f19448daa01f745e7e2
|
[
"Apache-2.0"
] | null | null | null |
permutation.py
|
pranaylobo/Team-Kalm-CP
|
daa967d84ccd162efc0b7f19448daa01f745e7e2
|
[
"Apache-2.0"
] | 1
|
2021-02-15T16:02:47.000Z
|
2021-02-15T16:02:47.000Z
|
permutation.py
|
pranaylobo/Team-Kalm-CP
|
daa967d84ccd162efc0b7f19448daa01f745e7e2
|
[
"Apache-2.0"
] | 1
|
2021-02-16T04:53:47.000Z
|
2021-02-16T04:53:47.000Z
|
node=[1,2,3]
key=0
current_level={}
node_store=[]
main(node,key,current_level,node_store)
| 21.710526
| 118
| 0.565455
|
def branches(node,current_level,key,node_store):
temp_current_level = current_level.copy()
# print(node_store)
for index,num in enumerate(node):
current_root = temp_current_level[num].copy()
current_leafnode = current_root[0].get(num).copy()
# print(current_leafnode)
if len(current_leafnode) == 1:
# print(node_store,current_level)
for k,v in current_level.items():
for k1,v1 in v[0].items():
print(node_store[0] ,k, *v1)
break
else:
print(num)
node_store.insert(0,num)
key=0
current_level={}
main(current_leafnode,key,current_level,node_store)
def main(node,key,current_level,node_store):
#level0
if key == len(node):
key=0
# print("First Level",current_level)
branches(node,current_level,key,node_store)
else:
copy_node=node.copy()
poped=node.pop(key) #pop root from the array
# print("Poped",poped)
root_level=[]
temp=[]
for i in range(0,len(node)):
root_level.append(*[node[i]])
# temp.append(node[i])
# current_level.update({"root"+str(copy_node[key]):copy_node[key],"leafnodes"+str(copy_node[key]):root_level})
current_level.update({copy_node[key]:[{copy_node[key]:root_level}]})
main(copy_node,key+1,current_level,node_store)
node=[1,2,3]
key=0
current_level={}
node_store=[]
main(node,key,current_level,node_store)
| 1,501
| 0
| 46
|
943f941b685bb98bbee64264564ff9847ddca2c1
| 8,005
|
py
|
Python
|
scripts/alvar_marker_to_baxter_picking_pose.py
|
birlrobotics/birl_kitting_experiment
|
75cf42b6fd187b12e99b0e916c73058f429a556c
|
[
"BSD-3-Clause"
] | 2
|
2019-06-03T03:33:50.000Z
|
2019-12-30T05:43:34.000Z
|
scripts/alvar_marker_to_baxter_picking_pose.py
|
birlrobotics/birl_kitting_experiment
|
75cf42b6fd187b12e99b0e916c73058f429a556c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/alvar_marker_to_baxter_picking_pose.py
|
birlrobotics/birl_kitting_experiment
|
75cf42b6fd187b12e99b0e916c73058f429a556c
|
[
"BSD-3-Clause"
] | 1
|
2019-12-30T05:43:35.000Z
|
2019-12-30T05:43:35.000Z
|
#!/usr/bin/env python
import rospy
from ar_track_alvar_msgs.msg import AlvarMarkers
import tf
from tf.transformations import (
translation_matrix,
quaternion_matrix,
translation_from_matrix,
quaternion_from_matrix,
)
import threading
import copy
import numpy
import ipdb
comfortable_pick_frame = numpy.matrix([[0.9270661704094123, 0.37483076461918924, 0.007086153923013515, 0.746], [0.37479879099742724, -0.9270901506257341, 0.0054515025104288975, -0.089], [0.008612894362151227, -0.002398021632153474, -0.9999600329727978, 0.115], [0.0, 0.0, 0.0, 1.0]])
to_confortable_pick_frame = comfortable_pick_frame[:3, :3].I
handcoded_marker_compensation = {
0: numpy.array(
[[0.04258225549353267, -0.9976699860199164, -0.05330431982591899, -0.003], [0.9979499876999266, 0.045024270145620604, -0.045482272893637384, 0.0], [0.04777628665771998, -0.05125830754984532, 0.9975419852519115, -0.033], [0.0, 0.0, 0.0, 1.0]]
, dtype=numpy.float64),
4: numpy.array(
((1.0, 0.0, 0.0, -0.0),
(0.0, 1.0, 0.0, -0.0),
(0.0, 0.0, 1.0, 0.05),
(0.0, 0.0, 0.0, 1.0))
, dtype=numpy.float64),
6: numpy.array(
((1.0, 0.0, 0.0, -0.0),
(0.0, 1.0, 0.0, -0.0),
(0.0, 0.0, 1.0, 0.05),
(0.0, 0.0, 0.0, 1.0))
, dtype=numpy.float64),
8: numpy.array(
[[0.04157746411335095, -0.9972777195581611, -0.06089716373343029, 0.0], [0.9963386576809764, 0.04594110483626879, -0.07210197013183828, -0.011], [0.07470337133203928, -0.0576763812950862, 0.9955364590773818, -0.028], [0.0, 0.0, 0.0, 1.0]]
, dtype=numpy.float64),
11: numpy.array((
((1.0, 0.0, 0.0, 0.016),
(0.0, 1.0, 0.0, -0.021),
(0.0, 0.0, 1.0, 0.029),
(0.0, 0.0, 0.0, 1.0))
), dtype=numpy.float64),
13: numpy.array((
[[0.09622387309747282, -0.9940084038491953, -0.05184842643028589, 0.004], [0.9940084038491953, 0.09867616353674313, -0.04701391099286735, 0.0], [0.05184842643028589, -0.04701391099286735, 0.9975477095607297, -0.017], [0.0, 0.0, 0.0, 1.0]]
), dtype=numpy.float64),
17: numpy.array((
((1.0, 0.0, 0.0, 0.0),
(0.0, 1.0, 0.0, -0.0),
(0.0, 0.0, 1.0, 0.075),
(0.0, 0.0, 0.0, 1.0))
), dtype=numpy.float64),
18: numpy.array((
[[-0.07792074091621037, 0.9962448437817034, -0.037743467957233004, 0.009],
[-0.9936061393855619, -0.07450042029363613, 0.08483234731746714, -0.027],
[0.081701884374772, 0.044112340840647246, 0.9956801210605593, -0.020],
[0.0, 0.0, 0.0, 1.0]]
), dtype=numpy.float64),
20: numpy.array((
[[0.06595220199306795, -0.9974408579981445, -0.02760510547328114, 0.009],
[0.9958555310795135, 0.06406061873788338, 0.06456003675076039, 0.016],
[-0.06262641831212726, -0.031748573556066424, 0.9975319342289496, -0.040],
[0.0, 0.0, 0.0, 1.0]]
), dtype=numpy.float64),
22: numpy.array((
((1.0,0.0,0.0,0.0),
(0.0, 1.0, 0.0, 0.016),
(0.0 , 0.0 ,1.0, 0.016),
(0.0, 0.0, 0.0, 1.0))
), dtype=numpy.float64),
24: numpy.array((
[[0.06595220199306795, -0.9974408579981445, -0.02760510547328114, 0.009],
[0.9958555310795135, 0.06406061873788338, 0.06456003675076039, 0.016],
[-0.06262641831212726, -0.031748573556066424, 0.9975319342289496, -0.06],
[0.0, 0.0, 0.0, 1.0]]
), dtype=numpy.float64),
}
writable = threading.Event()
writable.clear()
shared_msg = None
if __name__ == '__main__':
rospy.init_node("alvar_marker_to_baxter_picking_pose_py")
rospy.Subscriber("ar_pose_marker", AlvarMarkers, cb)
writable.set()
listener = tf.TransformListener()
broadcaster = tf.TransformBroadcaster()
pub = rospy.Publisher("baxter_available_picking_pose", AlvarMarkers, queue_size=10)
r = rospy.Rate(10)
while not rospy.is_shutdown():
writable.clear()
msg = copy.deepcopy(shared_msg)
writable.set()
if msg is not None:
look_up_t = rospy.Time(0)
listener.waitForTransform('base', 'left_hand_camera', look_up_t, rospy.Duration(3))
base_to_cam = listener.lookupTransform('base', 'left_hand_camera', look_up_t)
base_to_cam_mat = listener.fromTranslationRotation(*base_to_cam)
for marker in msg.markers:
pose = marker.pose.pose
pos = pose.position
ori = pose.orientation
cam_to_marker_mat = numpy.dot(translation_matrix((pos.x, pos.y, pos.z)), quaternion_matrix((ori.x, ori.y, ori.z, ori.w)))
base_to_marker = numpy.dot(base_to_cam_mat, cam_to_marker_mat)
broadcaster.sendTransform(
translation_from_matrix(base_to_marker),
quaternion_from_matrix(base_to_marker),
rospy.Time.now(),
'raw_marker_%s'%marker.id,
'base',
)
flipped_mat = transform_into_baxter_picking_space(base_to_marker)
trans = translation_from_matrix(flipped_mat)
quat = quaternion_from_matrix(flipped_mat)
broadcaster.sendTransform(
trans,
quat,
rospy.Time.now(),
'flipped_%s'%marker.id,
'base',
)
if marker.id in handcoded_marker_compensation:
compensated_mat = numpy.dot(flipped_mat, handcoded_marker_compensation[marker.id])
trans = translation_from_matrix(compensated_mat)
quat = quaternion_from_matrix(compensated_mat)
broadcaster.sendTransform(
trans,
quat,
rospy.Time.now(),
'compensated_%s'%marker.id,
'base',
)
noisy_mat = add_noise(compensated_mat)
trans = translation_from_matrix(noisy_mat)
quat = quaternion_from_matrix(noisy_mat)
broadcaster.sendTransform(
trans,
quat,
rospy.Time.now(),
'baxter_picking_pose_%s'%marker.id,
'base',
)
marker.pose.pose.position.x = trans[0]
marker.pose.pose.position.y = trans[1]
marker.pose.pose.position.z = trans[2]
marker.pose.pose.orientation.x = quat[0]
marker.pose.pose.orientation.y = quat[1]
marker.pose.pose.orientation.z = quat[2]
marker.pose.pose.orientation.w = quat[3]
if len(msg.markers) != 0:
pub.publish(msg)
try:
r.sleep()
except rospy.ROSInterruptException:
break
| 35.577778
| 283
| 0.566396
|
#!/usr/bin/env python
import rospy
from ar_track_alvar_msgs.msg import AlvarMarkers
import tf
from tf.transformations import (
translation_matrix,
quaternion_matrix,
translation_from_matrix,
quaternion_from_matrix,
)
import threading
import copy
import numpy
import ipdb
comfortable_pick_frame = numpy.matrix([[0.9270661704094123, 0.37483076461918924, 0.007086153923013515, 0.746], [0.37479879099742724, -0.9270901506257341, 0.0054515025104288975, -0.089], [0.008612894362151227, -0.002398021632153474, -0.9999600329727978, 0.115], [0.0, 0.0, 0.0, 1.0]])
to_confortable_pick_frame = comfortable_pick_frame[:3, :3].I
handcoded_marker_compensation = {
0: numpy.array(
[[0.04258225549353267, -0.9976699860199164, -0.05330431982591899, -0.003], [0.9979499876999266, 0.045024270145620604, -0.045482272893637384, 0.0], [0.04777628665771998, -0.05125830754984532, 0.9975419852519115, -0.033], [0.0, 0.0, 0.0, 1.0]]
, dtype=numpy.float64),
4: numpy.array(
((1.0, 0.0, 0.0, -0.0),
(0.0, 1.0, 0.0, -0.0),
(0.0, 0.0, 1.0, 0.05),
(0.0, 0.0, 0.0, 1.0))
, dtype=numpy.float64),
6: numpy.array(
((1.0, 0.0, 0.0, -0.0),
(0.0, 1.0, 0.0, -0.0),
(0.0, 0.0, 1.0, 0.05),
(0.0, 0.0, 0.0, 1.0))
, dtype=numpy.float64),
8: numpy.array(
[[0.04157746411335095, -0.9972777195581611, -0.06089716373343029, 0.0], [0.9963386576809764, 0.04594110483626879, -0.07210197013183828, -0.011], [0.07470337133203928, -0.0576763812950862, 0.9955364590773818, -0.028], [0.0, 0.0, 0.0, 1.0]]
, dtype=numpy.float64),
11: numpy.array((
((1.0, 0.0, 0.0, 0.016),
(0.0, 1.0, 0.0, -0.021),
(0.0, 0.0, 1.0, 0.029),
(0.0, 0.0, 0.0, 1.0))
), dtype=numpy.float64),
13: numpy.array((
[[0.09622387309747282, -0.9940084038491953, -0.05184842643028589, 0.004], [0.9940084038491953, 0.09867616353674313, -0.04701391099286735, 0.0], [0.05184842643028589, -0.04701391099286735, 0.9975477095607297, -0.017], [0.0, 0.0, 0.0, 1.0]]
), dtype=numpy.float64),
17: numpy.array((
((1.0, 0.0, 0.0, 0.0),
(0.0, 1.0, 0.0, -0.0),
(0.0, 0.0, 1.0, 0.075),
(0.0, 0.0, 0.0, 1.0))
), dtype=numpy.float64),
18: numpy.array((
[[-0.07792074091621037, 0.9962448437817034, -0.037743467957233004, 0.009],
[-0.9936061393855619, -0.07450042029363613, 0.08483234731746714, -0.027],
[0.081701884374772, 0.044112340840647246, 0.9956801210605593, -0.020],
[0.0, 0.0, 0.0, 1.0]]
), dtype=numpy.float64),
20: numpy.array((
[[0.06595220199306795, -0.9974408579981445, -0.02760510547328114, 0.009],
[0.9958555310795135, 0.06406061873788338, 0.06456003675076039, 0.016],
[-0.06262641831212726, -0.031748573556066424, 0.9975319342289496, -0.040],
[0.0, 0.0, 0.0, 1.0]]
), dtype=numpy.float64),
22: numpy.array((
((1.0,0.0,0.0,0.0),
(0.0, 1.0, 0.0, 0.016),
(0.0 , 0.0 ,1.0, 0.016),
(0.0, 0.0, 0.0, 1.0))
), dtype=numpy.float64),
24: numpy.array((
[[0.06595220199306795, -0.9974408579981445, -0.02760510547328114, 0.009],
[0.9958555310795135, 0.06406061873788338, 0.06456003675076039, 0.016],
[-0.06262641831212726, -0.031748573556066424, 0.9975319342289496, -0.06],
[0.0, 0.0, 0.0, 1.0]]
), dtype=numpy.float64),
}
writable = threading.Event()
writable.clear()
shared_msg = None
def cb(msg):
global writable, shared_msg
if writable.is_set():
shared_msg = msg
def transform_into_baxter_picking_space(mat):
# Determine x, y, z by directions
for axis in range(3):
swap_with = abs(mat[:3, axis]).argmax()
if swap_with != axis:
tmp = mat[:3, swap_with].copy()
mat[:3, swap_with] = mat[:3, axis]
mat[:3, axis] = tmp
# If z is pointing upwards, flip it
if mat[:3, 2][2] > 0:
mat[:3, 2] = -mat[:3, 2]
# If x is pointing inwards, flip it
vec_x = mat[:3, 0].reshape((3, -1))
if (to_confortable_pick_frame*vec_x)[0] < 0:
mat[:3, 0] = -mat[:3, 0]
mat[:3, 1] = -mat[:3, 1]
# Make sure x, y, z subject to right-hand rule
if numpy.cross(mat[:3, 0], mat[:3, 1])[2] > 0:
mat[:3, 1] = -mat[:3, 1]
return mat
def add_noise(mat):
#mat[0, 3] += numpy.random.normal(0, 0.02)
#mat[1, 3] += numpy.random.normal(0, 0.02)
#mat[2, 3] += numpy.random.normal(0, 0.02)
return mat
if __name__ == '__main__':
rospy.init_node("alvar_marker_to_baxter_picking_pose_py")
rospy.Subscriber("ar_pose_marker", AlvarMarkers, cb)
writable.set()
listener = tf.TransformListener()
broadcaster = tf.TransformBroadcaster()
pub = rospy.Publisher("baxter_available_picking_pose", AlvarMarkers, queue_size=10)
r = rospy.Rate(10)
while not rospy.is_shutdown():
writable.clear()
msg = copy.deepcopy(shared_msg)
writable.set()
if msg is not None:
look_up_t = rospy.Time(0)
listener.waitForTransform('base', 'left_hand_camera', look_up_t, rospy.Duration(3))
base_to_cam = listener.lookupTransform('base', 'left_hand_camera', look_up_t)
base_to_cam_mat = listener.fromTranslationRotation(*base_to_cam)
for marker in msg.markers:
pose = marker.pose.pose
pos = pose.position
ori = pose.orientation
cam_to_marker_mat = numpy.dot(translation_matrix((pos.x, pos.y, pos.z)), quaternion_matrix((ori.x, ori.y, ori.z, ori.w)))
base_to_marker = numpy.dot(base_to_cam_mat, cam_to_marker_mat)
broadcaster.sendTransform(
translation_from_matrix(base_to_marker),
quaternion_from_matrix(base_to_marker),
rospy.Time.now(),
'raw_marker_%s'%marker.id,
'base',
)
flipped_mat = transform_into_baxter_picking_space(base_to_marker)
trans = translation_from_matrix(flipped_mat)
quat = quaternion_from_matrix(flipped_mat)
broadcaster.sendTransform(
trans,
quat,
rospy.Time.now(),
'flipped_%s'%marker.id,
'base',
)
if marker.id in handcoded_marker_compensation:
compensated_mat = numpy.dot(flipped_mat, handcoded_marker_compensation[marker.id])
trans = translation_from_matrix(compensated_mat)
quat = quaternion_from_matrix(compensated_mat)
broadcaster.sendTransform(
trans,
quat,
rospy.Time.now(),
'compensated_%s'%marker.id,
'base',
)
noisy_mat = add_noise(compensated_mat)
trans = translation_from_matrix(noisy_mat)
quat = quaternion_from_matrix(noisy_mat)
broadcaster.sendTransform(
trans,
quat,
rospy.Time.now(),
'baxter_picking_pose_%s'%marker.id,
'base',
)
marker.pose.pose.position.x = trans[0]
marker.pose.pose.position.y = trans[1]
marker.pose.pose.position.z = trans[2]
marker.pose.pose.orientation.x = quat[0]
marker.pose.pose.orientation.y = quat[1]
marker.pose.pose.orientation.z = quat[2]
marker.pose.pose.orientation.w = quat[3]
if len(msg.markers) != 0:
pub.publish(msg)
try:
r.sleep()
except rospy.ROSInterruptException:
break
| 967
| 0
| 69
|
38f856dac8ab642dc811a76eee8077f057643a7b
| 1,600
|
py
|
Python
|
nova/api/openstack/compute/schemas/hypervisors.py
|
teresa-ho/stx-nova
|
1f82323439da2449edbbaed2fe1c8414a550c86f
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/schemas/hypervisors.py
|
teresa-ho/stx-nova
|
1f82323439da2449edbbaed2fe1c8414a550c86f
|
[
"Apache-2.0"
] | 2
|
2015-02-03T06:25:24.000Z
|
2015-02-04T10:10:36.000Z
|
nova/api/openstack/compute/schemas/hypervisors.py
|
teresa-ho/stx-nova
|
1f82323439da2449edbbaed2fe1c8414a550c86f
|
[
"Apache-2.0"
] | 7
|
2015-01-20T10:30:08.000Z
|
2020-02-05T10:29:05.000Z
|
# Copyright 2017 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
list_query_schema_v253 = {
'type': 'object',
'properties': {
# The 2.33 microversion added support for paging by limit and marker.
'limit': parameter_types.single_param(
parameter_types.non_negative_integer),
'marker': parameter_types.single_param({'type': 'string'}),
# The 2.53 microversion adds support for filtering by hostname pattern
# and requesting hosted servers in the GET /os-hypervisors and
# GET /os-hypervisors/detail response.
'hypervisor_hostname_pattern': parameter_types.single_param(
parameter_types.hostname),
'with_servers': parameter_types.single_param(
parameter_types.boolean)
},
'additionalProperties': False
}
show_query_schema_v253 = {
'type': 'object',
'properties': {
'with_servers': parameter_types.single_param(
parameter_types.boolean)
},
'additionalProperties': False
}
| 36.363636
| 78
| 0.70625
|
# Copyright 2017 Huawei Technologies Co.,LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
list_query_schema_v253 = {
'type': 'object',
'properties': {
# The 2.33 microversion added support for paging by limit and marker.
'limit': parameter_types.single_param(
parameter_types.non_negative_integer),
'marker': parameter_types.single_param({'type': 'string'}),
# The 2.53 microversion adds support for filtering by hostname pattern
# and requesting hosted servers in the GET /os-hypervisors and
# GET /os-hypervisors/detail response.
'hypervisor_hostname_pattern': parameter_types.single_param(
parameter_types.hostname),
'with_servers': parameter_types.single_param(
parameter_types.boolean)
},
'additionalProperties': False
}
show_query_schema_v253 = {
'type': 'object',
'properties': {
'with_servers': parameter_types.single_param(
parameter_types.boolean)
},
'additionalProperties': False
}
| 0
| 0
| 0
|
2501f6a15fea174f7089a921e75d9e41e9a71af7
| 6,098
|
py
|
Python
|
tests/graphs/synthetic/scenarios.py
|
pedrofreitascampospro/locintel
|
eb9c56cdc308660c31d90abe9fe62bd3634ba273
|
[
"MIT"
] | null | null | null |
tests/graphs/synthetic/scenarios.py
|
pedrofreitascampospro/locintel
|
eb9c56cdc308660c31d90abe9fe62bd3634ba273
|
[
"MIT"
] | null | null | null |
tests/graphs/synthetic/scenarios.py
|
pedrofreitascampospro/locintel
|
eb9c56cdc308660c31d90abe9fe62bd3634ba273
|
[
"MIT"
] | null | null | null |
"""
Scenarios for ODD generation/restriction of a base graph.
Steps to create scenarios:
1. Define your reference graph:
Add/choose graph fixture
-> This is your base/reference graph, analogous to our OSM base map
2. Define your final odd graph (expected output)
Restrict base graph using/adding `restriction` functions
-> This will create an ODD subgraph, which is the final result expected by the method under test
3. Generate synthetic scenarios
Use/add `transformation` functions
-> This will create synthetic inputs to the methods under test
4. Make sure these are fed to `generate_scenarios`
"""
from copy import deepcopy
import networkx as nx
from locintel.graphs.datamodel.jurbey import Node
from allpairspy import AllPairs
from tests.synthetic.graphs import (
urban_grid_no_geometry,
urban_grid_node_geometry,
urban_grid_node_and_edge_geometry,
)
from tests.synthetic.utils import (
interpolated_geometry,
create_edge,
requires,
find_midpoint,
)
########################################################
# ODD restrictions #
# #
# Restriction to apply to base graph, to generate #
# synthetic ODDs #
# #
########################################################
########################################################
# Graph transformations #
# #
# Transformations to apply to the ODD graph, in order #
# to emulate real world use cases on arbitrary #
# provider maps #
# #
########################################################
@requires("edge_geometry")
def generate_scenarios(
base_graphs, restrictions, transformations, combination_function=None
):
"""
Generates scenarios consisting of combinations of graphs, restrictions and transformations, according to logic
defined on combination_function. By default applies all pairs combinatorial method to keep it efficient, see more
info here: https://www.tutorialspoint.com/software_testing_dictionary/all_pairs_testing.htm
"""
combination_function = combination_function or AllPairs
for base_graph, restriction, transformation in combination_function(
[base_graphs, restrictions, transformations], filter_func=is_valid_combination
):
odd_graph = restriction(base_graph)
transformed_graph = transformation(odd_graph)
name = f"{base_graph.metadata['version']}_{restriction.__name__}_{transformation.__name__}"
yield GraphTestScenario(name, base_graph, odd_graph, transformed_graph)
odd_restrictions = [no_restrictions, remove_node, remove_edge]
graph_transformations = [no_transformations, change_node_ids, split_edges]
graphs = [
urban_grid_no_geometry,
urban_grid_node_geometry,
urban_grid_node_and_edge_geometry,
]
scenarios = generate_scenarios(graphs, odd_restrictions, graph_transformations)
| 34.258427
| 117
| 0.63529
|
"""
Scenarios for ODD generation/restriction of a base graph.
Steps to create scenarios:
1. Define your reference graph:
Add/choose graph fixture
-> This is your base/reference graph, analogous to our OSM base map
2. Define your final odd graph (expected output)
Restrict base graph using/adding `restriction` functions
-> This will create an ODD subgraph, which is the final result expected by the method under test
3. Generate synthetic scenarios
Use/add `transformation` functions
-> This will create synthetic inputs to the methods under test
4. Make sure these are fed to `generate_scenarios`
"""
from copy import deepcopy
import networkx as nx
from locintel.graphs.datamodel.jurbey import Node
from allpairspy import AllPairs
from tests.synthetic.graphs import (
urban_grid_no_geometry,
urban_grid_node_geometry,
urban_grid_node_and_edge_geometry,
)
from tests.synthetic.utils import (
interpolated_geometry,
create_edge,
requires,
find_midpoint,
)
########################################################
# ODD restrictions #
# #
# Restriction to apply to base graph, to generate #
# synthetic ODDs #
# #
########################################################
def no_restrictions(base_graph):
odd_graph = deepcopy(base_graph)
return odd_graph
def remove_node(base_graph):
odd_graph = deepcopy(base_graph)
odd_graph.remove_node(5)
return odd_graph
def remove_edge(base_graph):
odd_graph = deepcopy(base_graph)
odd_graph.remove_edge(13, 5)
return odd_graph
########################################################
# Graph transformations #
# #
# Transformations to apply to the ODD graph, in order #
# to emulate real world use cases on arbitrary #
# provider maps #
# #
########################################################
def no_transformations(odd_graph):
transformed_graph = deepcopy(odd_graph)
return transformed_graph
def change_node_ids(odd_graph):
# tests whether method is resilient to changing node IDs, while keeping the same "topology"
mapper = {node: "Prefix" + str(node) for node in odd_graph.nodes}
transformed_graph = nx.relabel_nodes(deepcopy(odd_graph), mapper)
return transformed_graph
@requires("edge_geometry")
def split_edges(odd_graph):
# tests whether method is resilient to arbitrary edge fragmentation
transformed_graph = deepcopy(odd_graph)
for edge in odd_graph.edges:
edge_data = odd_graph.get_edge_data(*edge)["data"]
start_node = edge[0]
end_node = edge[1]
midpoint = find_midpoint(edge_data.geometry)
new_node_id = transformed_graph.add_node(data=Node(coord=midpoint))
transformed_graph.add_edge(
start_node,
new_node_id,
data=create_edge(
geometry=interpolated_geometry(
transformed_graph.nodes[start_node]["data"].coord, midpoint
)
),
)
transformed_graph.add_edge(
new_node_id,
end_node,
data=create_edge(
geometry=interpolated_geometry(
midpoint, transformed_graph.nodes[end_node]["data"].coord
)
),
)
transformed_graph.remove_edge(*edge)
return transformed_graph
class GraphTestScenario(object):
def __init__(self, name, base_graph, expected_graph, input_graph):
"""
:param name: scenario name, as string
:param base_graph: base graph to restrict and transform (acts as OSM reference graph)
:param expected_graph: final graph correctly restricted to ODD
:param input_graph: graph with restriction information which serves as input to the method
"""
self.name = name
self.base_graph = base_graph
self.expected_graph = expected_graph
self.input_graph = input_graph
def __repr__(self):
return self.name
def is_valid_combination(combo):
if len(combo) == 3:
graph = combo[0]
transformation = combo[2]
transform_requirements = transformation.__annotations__.get("requirements", [])
if "edge_geometry" in transform_requirements:
if not all(
len(getattr(graph.get_edge_data(*edge)["data"], "geometry", [])) > 0
for edge in graph.edges()
):
return False
return True
def generate_scenarios(
base_graphs, restrictions, transformations, combination_function=None
):
"""
Generates scenarios consisting of combinations of graphs, restrictions and transformations, according to logic
defined on combination_function. By default applies all pairs combinatorial method to keep it efficient, see more
info here: https://www.tutorialspoint.com/software_testing_dictionary/all_pairs_testing.htm
"""
combination_function = combination_function or AllPairs
for base_graph, restriction, transformation in combination_function(
[base_graphs, restrictions, transformations], filter_func=is_valid_combination
):
odd_graph = restriction(base_graph)
transformed_graph = transformation(odd_graph)
name = f"{base_graph.metadata['version']}_{restriction.__name__}_{transformation.__name__}"
yield GraphTestScenario(name, base_graph, odd_graph, transformed_graph)
odd_restrictions = [no_restrictions, remove_node, remove_edge]
graph_transformations = [no_transformations, change_node_ids, split_edges]
graphs = [
urban_grid_no_geometry,
urban_grid_node_geometry,
urban_grid_node_and_edge_geometry,
]
scenarios = generate_scenarios(graphs, odd_restrictions, graph_transformations)
| 2,135
| 589
| 181
|
121b3fe06c1efda9c51df286b247a6fc0e9256d0
| 2,632
|
py
|
Python
|
sprd/vowifi/tmtc_ut/sdkut.py
|
deevarvar/myLab
|
7a5019f5f7fc11e173d350e6e2a7d2c80504782d
|
[
"MIT"
] | null | null | null |
sprd/vowifi/tmtc_ut/sdkut.py
|
deevarvar/myLab
|
7a5019f5f7fc11e173d350e6e2a7d2c80504782d
|
[
"MIT"
] | null | null | null |
sprd/vowifi/tmtc_ut/sdkut.py
|
deevarvar/myLab
|
7a5019f5f7fc11e173d350e6e2a7d2c80504782d
|
[
"MIT"
] | 3
|
2016-10-08T15:01:49.000Z
|
2018-05-24T03:14:24.000Z
|
#-*- coding=utf-8 -*-
#author: zhihua.ye@spreadtrum.com
import os
from tmtc_ut import *
from time import gmtime, strftime
from lib.report import *
from lib.htmlgenerator import *
from lib.jinjagenerator import *
if __name__ == '__main__':
sdk = Sdkut(casedir="./cases", bindir='./bin')
sdk.run()
sdk.dumpreport()
| 37.6
| 124
| 0.545593
|
#-*- coding=utf-8 -*-
#author: zhihua.ye@spreadtrum.com
import os
from tmtc_ut import *
from time import gmtime, strftime
from lib.report import *
from lib.htmlgenerator import *
from lib.jinjagenerator import *
class Sdkut(object):
def __init__(self, casedir='', bindir=''):
self.casedir = casedir
self.brickdir = os.path.realpath(casedir) + '/bricks'
self.bindir = bindir
self.utils = logutils()
self.timestamp = strftime("%Y_%m_%d_%H_%M_%S", gmtime())
self.outdir = './output/' + self.timestamp
self.reports = list()
self.logger = logConf()
def run(self):
for cdir in os.listdir(self.casedir):
if cdir != 'bricks':
confdir = self.casedir + '/' + cdir
onetmtc = TmtcUt(confdir=confdir, brickdir=self.brickdir,bindir=self.bindir, outdir=self.outdir)
try:
onetmtc.envsetup()
onetmtc.run()
except :
#try to catch exception, continue to execute.
etype = sys.exc_info()[0]
evalue = sys.exc_info()[1]
estr = str(etype) + ' ' + repr(evalue)
self.logger.logger.error("Unexpected error:" + estr)
self.reports.append(onetmtc.getreport())
def dumpreport(self):
fjson = list()
for index, report in enumerate(self.reports):
resultstr = "Passed" if report.getresult() else "Failed"
self.logger.logger.info('Case ' + report.getdesc() + " , run " + repr(report.getruntime()) + " s," + resultstr)
fjson.append(report.todict())
#https://www.w3cschool.cn/tryrun/showhtml/tryhtml_table_span
with open(self.outdir + '/report.json', 'w+') as f:
f.write(json.dumps(fjson, indent=4))
#old style html
"""
hg = htmlgenerator(data=fjson, outdir=self.outdir)
hg.addstyle()
hg.genSummary()
hg.genReportTable()
hg.dump()
"""
summary = convert(fjson)
temphtml = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"lib/templates",
"tmtc_report_sample.html"
)
jinja2 = JinjaGenerator(templatehtml=temphtml, data=summary)
html = jinja2.render()
with open(self.outdir + "/report.html", "w+") as file:
file.write(html)
if __name__ == '__main__':
sdk = Sdkut(casedir="./cases", bindir='./bin')
sdk.run()
sdk.dumpreport()
| 2,186
| -1
| 110
|
e6f542beb92d58a239b6422082d785c7575ae86a
| 1,136
|
py
|
Python
|
TrEEProfiler/rrg.py
|
maxdy/BioTools
|
0240ed203ef64e2cfd637a0fd508096151df7482
|
[
"MIT"
] | null | null | null |
TrEEProfiler/rrg.py
|
maxdy/BioTools
|
0240ed203ef64e2cfd637a0fd508096151df7482
|
[
"MIT"
] | null | null | null |
TrEEProfiler/rrg.py
|
maxdy/BioTools
|
0240ed203ef64e2cfd637a0fd508096151df7482
|
[
"MIT"
] | null | null | null |
import csv
import os
lnr_genomes_aa = [] # aa = assembly_accession
with open("bacteria_as_comref_genomes_wo_ecoli",'r') as f:
next(f) # skip headings
reader=csv.reader(f,delimiter='\t')
tmp_size = None
tmp_aa = None
tmp_s_id = None
for line in reader:
filepath = line[20].replace("/cygdrive/h/","H:/")
print filepath
statinfo = os.stat(filepath)
# Check whether there is a duplicate
if tmp_s_id != line[6]: # sid = species_id
tmp_aa = line[0]
lnr_genomes_aa.append(tmp_aa)
tmp_size = statinfo.st_size
else:
print ">",tmp_s_id
# Keep the largest genomes in a same clade
if tmp_size < statinfo.st_size:
tmp_size = statinfo.st_size
tmp_aa = line[0]
tmp_s_id = line[6]
lnr_genomes_aa.append(tmp_aa)
print lnr_genomes_aa
print "TOTAL:",len(lnr_genomes_aa)
with open("bacteria_as_comref_genomes_wo_ecoli",'r') as f:
lines = f.readlines()
# lnr = Largest Non-Redundant
nf = open("bacteria_as_comref_genomes_wo_ecoli_lnr","w")
for line in lines:
aa = line.split("\t")[0]
if aa in lnr_genomes_aa:
nf.write(line)
lnr_genomes_aa.remove(aa)
else:
print "NOT IN: ",aa
| 25.818182
| 59
| 0.698063
|
import csv
import os
lnr_genomes_aa = [] # aa = assembly_accession
with open("bacteria_as_comref_genomes_wo_ecoli",'r') as f:
next(f) # skip headings
reader=csv.reader(f,delimiter='\t')
tmp_size = None
tmp_aa = None
tmp_s_id = None
for line in reader:
filepath = line[20].replace("/cygdrive/h/","H:/")
print filepath
statinfo = os.stat(filepath)
# Check whether there is a duplicate
if tmp_s_id != line[6]: # sid = species_id
tmp_aa = line[0]
lnr_genomes_aa.append(tmp_aa)
tmp_size = statinfo.st_size
else:
print ">",tmp_s_id
# Keep the largest genomes in a same clade
if tmp_size < statinfo.st_size:
tmp_size = statinfo.st_size
tmp_aa = line[0]
tmp_s_id = line[6]
lnr_genomes_aa.append(tmp_aa)
print lnr_genomes_aa
print "TOTAL:",len(lnr_genomes_aa)
with open("bacteria_as_comref_genomes_wo_ecoli",'r') as f:
lines = f.readlines()
# lnr = Largest Non-Redundant
nf = open("bacteria_as_comref_genomes_wo_ecoli_lnr","w")
for line in lines:
aa = line.split("\t")[0]
if aa in lnr_genomes_aa:
nf.write(line)
lnr_genomes_aa.remove(aa)
else:
print "NOT IN: ",aa
| 0
| 0
| 0
|
2857cbb68ef20cf62c1c0b75128c6e3e01a4ca7b
| 286
|
py
|
Python
|
Python/decorators/cache.py
|
Suraj-Rajesh/code
|
3d554c4d1d5cf4bd9d084b8034641c1f6c2a47c9
|
[
"MIT"
] | null | null | null |
Python/decorators/cache.py
|
Suraj-Rajesh/code
|
3d554c4d1d5cf4bd9d084b8034641c1f6c2a47c9
|
[
"MIT"
] | null | null | null |
Python/decorators/cache.py
|
Suraj-Rajesh/code
|
3d554c4d1d5cf4bd9d084b8034641c1f6c2a47c9
|
[
"MIT"
] | null | null | null |
from functools import wraps
@cache
fn(1)
fn(1)
fn(3)
| 13.619048
| 34
| 0.524476
|
from functools import wraps
def cache(func):
memo = {}
@wraps(func)
def wrapper(x):
print 'memo: ' + str(memo)
if x not in memo:
memo[x] = func(x)
return memo[x]
return wrapper
@cache
def fn(x):
return x + 1
fn(1)
fn(1)
fn(3)
| 185
| 0
| 45
|
839058cc75182dfc3372449c4c1ed12c0cb41c43
| 2,805
|
py
|
Python
|
crazyflie-lib-python/sys_test/swarm_test_rig/test_logging.py
|
manikamakam/swarm
|
3d3f4692f1969e0973fa8929660a8d0da53cafa7
|
[
"MIT"
] | null | null | null |
crazyflie-lib-python/sys_test/swarm_test_rig/test_logging.py
|
manikamakam/swarm
|
3d3f4692f1969e0973fa8929660a8d0da53cafa7
|
[
"MIT"
] | null | null | null |
crazyflie-lib-python/sys_test/swarm_test_rig/test_logging.py
|
manikamakam/swarm
|
3d3f4692f1969e0973fa8929660a8d0da53cafa7
|
[
"MIT"
] | 1
|
2019-12-02T01:00:18.000Z
|
2019-12-02T01:00:18.000Z
|
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2019 Bitcraze AB
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import unittest
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.log import LogConfig
from cflib.crazyflie.swarm import CachedCfFactory
from cflib.crazyflie.swarm import Swarm
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.crazyflie.syncLogger import SyncLogger
from sys_test.swarm_test_rig.rig_support import RigSupport
| 37.905405
| 79
| 0.688057
|
# -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2019 Bitcraze AB
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import unittest
import cflib.crtp
from cflib.crazyflie import Crazyflie
from cflib.crazyflie.log import LogConfig
from cflib.crazyflie.swarm import CachedCfFactory
from cflib.crazyflie.swarm import Swarm
from cflib.crazyflie.syncCrazyflie import SyncCrazyflie
from cflib.crazyflie.syncLogger import SyncLogger
from sys_test.swarm_test_rig.rig_support import RigSupport
class TestLogging(unittest.TestCase):
def setUp(self):
cflib.crtp.init_drivers(enable_debug_driver=False)
self.test_rig_support = RigSupport()
def test_that_requested_logging_is_received_properly_from_one_cf(self):
# Fixture
uri = self.test_rig_support.all_uris[0]
self.test_rig_support.restart_devices([uri])
cf = Crazyflie(rw_cache='./cache')
# Test and Assert
with SyncCrazyflie(uri, cf=cf) as scf:
self.assert_add_logging_and_get_non_zero_value(scf)
def test_that_requested_logging_is_received_properly_from_all_cfs(self):
# Fixture
uris = self.test_rig_support.all_uris
self.test_rig_support.restart_devices(uris)
factory = CachedCfFactory(rw_cache='./cache')
# Test and Assert
with Swarm(uris, factory=factory) as swarm:
swarm.parallel_safe(self.assert_add_logging_and_get_non_zero_value)
def assert_add_logging_and_get_non_zero_value(self, scf):
log_name = 'stabilizer.roll'
expected = 0.0
lg_conf = LogConfig(name='SysTest', period_in_ms=10)
lg_conf.add_variable(log_name, 'float')
with SyncLogger(scf, lg_conf) as logger:
for log_entry in logger:
actual = log_entry[1][log_name]
break
self.assertNotAlmostEqual(expected, actual, places=4)
| 1,255
| 16
| 130
|
3b39c3840d44cbefc3a21780e9618b88653a856d
| 458
|
py
|
Python
|
frontend_progress_bar/new_uploads/upload.py
|
simonfong6/micro-projects
|
5be195ea72ce117df6da041446f11c18e102b5df
|
[
"MIT"
] | null | null | null |
frontend_progress_bar/new_uploads/upload.py
|
simonfong6/micro-projects
|
5be195ea72ce117df6da041446f11c18e102b5df
|
[
"MIT"
] | null | null | null |
frontend_progress_bar/new_uploads/upload.py
|
simonfong6/micro-projects
|
5be195ea72ce117df6da041446f11c18e102b5df
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/upload', methods=['GET', 'POST'])
@app.route('/upload/progress')
@app.route('/progress')
if __name__ == '__main__':
app.run(debug=True)
| 20.818182
| 62
| 0.639738
|
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/upload', methods=['GET', 'POST'])
def upload():
if request.method == 'POST' and 'media' in request.files:
return 100
return render_template('upload.html')
@app.route('/upload/progress')
def progress():
return 100
@app.route('/progress')
def short_progress():
return 100
if __name__ == '__main__':
app.run(debug=True)
| 146
| 0
| 69
|
fe9e6db54705ed4e0c750286b5324cb8fe1fc623
| 4,167
|
py
|
Python
|
GMOS_visualization.py
|
abostroem/gemini_pipeline
|
8ac0c6359f643b9dbc18dcbba34ba73d1750434f
|
[
"BSD-3-Clause"
] | null | null | null |
GMOS_visualization.py
|
abostroem/gemini_pipeline
|
8ac0c6359f643b9dbc18dcbba34ba73d1750434f
|
[
"BSD-3-Clause"
] | null | null | null |
GMOS_visualization.py
|
abostroem/gemini_pipeline
|
8ac0c6359f643b9dbc18dcbba34ba73d1750434f
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Visualize steps of the calibration process to ensure everything went according to plan
'''
from matplotlib import pyplot as plt
from astropy.io import fits
from visualization import zscale #https://github.com/abostroem/utilities
overscan_size = 32 #pixels
unusable_bottom = 48//2 #pixels
| 32.811024
| 90
| 0.596592
|
'''
Visualize steps of the calibration process to ensure everything went according to plan
'''
from matplotlib import pyplot as plt
from astropy.io import fits
from visualization import zscale #https://github.com/abostroem/utilities
overscan_size = 32 #pixels
unusable_bottom = 48//2 #pixels
def visualize_bias(biasfile, out_filename):
fig, ax_list = plt.subplots(nrows=1, ncols=12, sharey=True, figsize=[10, 7])
ofile = fits.open(biasfile)
object_name = ofile[0].header['OBJECT']
fig.suptitle(object_name)
if len(ofile) > 13:
ofile = ofile[1::3]
else:
ofile = ofile[1:]
extnum = 1
for ax, ext in zip(ax_list, ofile):
img = ext.data
#remove overscan region
if extnum%2 == 0:
img = img[unusable_bottom//2:, overscan_size:]
else:
img = img[unusable_bottom:, :-overscan_size]
vmin, vmax = zscale(img)
ax.imshow(img, cmap='bone', vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_title('EXT {}'.format(extnum))
extnum+=1
plt.subplots_adjust(wspace=0)
plt.savefig(out_filename)
def visualize_flat(flatfile, out_filename):
fig, ax_list = plt.subplots(nrows=1, ncols=12, sharey=True, figsize=[10, 7])
ofile = fits.open(flatfile)
object_name = ofile[0].header['OBJECT']
fig.suptitle(object_name)
extnum = 1
for ax, ext in zip(ax_list, ofile[1:]):
img = ext.data
vmin, vmax = zscale(img)
ax.imshow(img, cmap='bone', vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_title('EXT {}'.format(extnum))
extnum += 1
plt.subplots_adjust(wspace=0)
plt.savefig(out_filename)
def visualize_science(sciencefile, out_filename, remove_overscan=False):
fig, ax_list = plt.subplots(nrows=1, ncols=12, sharey=True, figsize=[10, 7])
ofile = fits.open(sciencefile)
object_name = ofile[0].header['OBJECT']
fig.suptitle(object_name)
extnum=1
for ax, ext in zip(ax_list, ofile[1:]):
img = ext.data
#remove overscan region
if remove_overscan is True:
if extnum%2 == 0:
img = img[:, overscan_size:]
else:
img = img[:, :-overscan_size]
img = img[unusable_bottom:, :]
vmin, vmax = zscale(img)
ax.imshow(img, cmap='bone', vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_title('EXT {}'.format(extnum))
extnum += 1
plt.savefig(out_filename)
def comp_to_science(biasfile, flatfile, sciencefile, out_filename, remove_overscan=False):
fig, ax_list = plt.subplots(nrows=1, ncols=36, sharey=True, figsize=[25, 7])
#BIAS
ofile = fits.open(biasfile)
extnum = 1
for ax, ext in zip(ax_list[0::3], ofile[1::3]):
img = ext.data
#remove overscan region
if extnum%2 == 0:
img = img[unusable_bottom//2:, overscan_size:]
else:
img = img[unusable_bottom//2:, :-overscan_size]
vmin, vmax = zscale(img)
ax.imshow(img, cmap='bone', vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_title('BIA {}'.format(extnum))
extnum+=1
#FLAT
ofile = fits.open(flatfile)
extnum = 1
for ax, ext in zip(ax_list[1::3], ofile[1:]):
img = ext.data
vmin, vmax = zscale(img)
ax.imshow(img, cmap='bone', vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_title('FLT {}'.format(extnum))
extnum += 1
#Science
ofile = fits.open(sciencefile)
object_name = ofile[0].header['OBJECT']
fig.suptitle(object_name)
extnum=1
for ax, ext in zip(ax_list[2::3], ofile[1:]):
img = ext.data
#remove overscan region
if remove_overscan is True:
if extnum%2 == 0:
img = img[unusable_bottom:, overscan_size:]
else:
img = img[unusable_bottom:, :-overscan_size]
vmin, vmax = zscale(img)
ax.imshow(img, cmap='bone', vmin=vmin, vmax=vmax)
ax.set_xticks([])
ax.set_title('SCI {}'.format(extnum))
extnum += 1
plt.savefig(out_filename)
| 3,764
| 0
| 92
|
00ff4781240fd1c0a1d0349983f250d455bb5a45
| 1,181
|
py
|
Python
|
mnist/log.py
|
syitong/randfourier
|
e8aa9a7e0eed13d94a68b06a294060af40fa5557
|
[
"MIT"
] | 4
|
2018-11-29T19:15:32.000Z
|
2020-01-04T07:45:49.000Z
|
mnist/log.py
|
syitong/randfourier
|
e8aa9a7e0eed13d94a68b06a294060af40fa5557
|
[
"MIT"
] | 1
|
2019-04-26T17:33:03.000Z
|
2019-04-27T15:24:57.000Z
|
mnist/log.py
|
syitong/randfourier
|
e8aa9a7e0eed13d94a68b06a294060af40fa5557
|
[
"MIT"
] | null | null | null |
import time
class log:
"""
This module is used to track the progress of events
and write it into a log file.
"""
| 35.787879
| 81
| 0.521592
|
import time
class log:
"""
This module is used to track the progress of events
and write it into a log file.
"""
def __init__(self,filepath,init_message):
self.message = ''
self.filepath = filepath
self.progress = {'task':[init_message],'time':[time.process_time()]}
print(self.progress['task'][-1]
+ ': {:.4f}'.format(self.progress['time'][-1]))
def time_event(self,message):
self.progress['task'].append(message)
self.progress['time'].append(time.process_time())
print(self.progress['task'][-1]
+ ': {:.4f}'.format(self.progress['time'][-1]
- self.progress['time'][-2]))
def record(self,message):
self.message = message
def save(self):
progress = self.progress
with open(self.filepath,'w') as logfile:
for idx in range(1,len(progress['task'])):
logfile.write(progress['task'][idx]
+ ': {:.4f}\n'.format(progress['time'][idx]
- progress['time'][idx - 1]))
logfile.write(self.message)
| 944
| 0
| 107
|
3edf5e6a9773f933621f46766289036db8eba0ff
| 246
|
gyp
|
Python
|
binding.gyp
|
SSTIA/fun-judge
|
010a5413642e1f28cf31ae35f2a605c0a836e859
|
[
"MIT"
] | null | null | null |
binding.gyp
|
SSTIA/fun-judge
|
010a5413642e1f28cf31ae35f2a605c0a836e859
|
[
"MIT"
] | null | null | null |
binding.gyp
|
SSTIA/fun-judge
|
010a5413642e1f28cf31ae35f2a605c0a836e859
|
[
"MIT"
] | 1
|
2018-12-06T06:59:56.000Z
|
2018-12-06T06:59:56.000Z
|
{
"targets": [
{
"target_name": "forbidden-point-finder",
"sources": [
"src/binding/finder.cpp",
"forbidden-point-finder/ForbiddenPointFinder.cpp"
]
}
]
}
| 22.363636
| 65
| 0.434959
|
{
"targets": [
{
"target_name": "forbidden-point-finder",
"sources": [
"src/binding/finder.cpp",
"forbidden-point-finder/ForbiddenPointFinder.cpp"
]
}
]
}
| 0
| 0
| 0
|
32726b1cb6b6247e15672ca2217538000b322b57
| 8,117
|
py
|
Python
|
model/sda.py
|
urielcaire/mult
|
45c0cba69153442be2cee6309d46d55086445e5c
|
[
"Apache-2.0"
] | 12
|
2020-10-13T01:27:35.000Z
|
2021-11-22T13:42:26.000Z
|
model/sda.py
|
urielcaire/mult
|
45c0cba69153442be2cee6309d46d55086445e5c
|
[
"Apache-2.0"
] | null | null | null |
model/sda.py
|
urielcaire/mult
|
45c0cba69153442be2cee6309d46d55086445e5c
|
[
"Apache-2.0"
] | 2
|
2020-10-12T13:40:41.000Z
|
2022-03-29T05:28:59.000Z
|
# Copyright 2020 The MuLT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from model import Model
import tensorflow as tf
| 35.757709
| 117
| 0.541456
|
# Copyright 2020 The MuLT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from model import Model
import tensorflow as tf
class StackedDenoisingAutoencoder(Model):
def __init__(self, model_name='sda', summary_directory='../output/'):
self.graph = tf.Graph()
self.model_name = model_name
with self.graph.as_default():
self.session = tf.Session(graph=self.graph)
self.summary_directory = summary_directory
self.optimizers = []
self.initial_weights, self.initial_biases = [], []
self.corrupted_inputs = []
self.weights, self.biases = [], []
self.encoders, self.decoders = [], []
self.tensorboard = []
self.add_summaries = self.summary_directory is not None
self.saver = None
def build(self,
n_input_features,
units_per_hidden_layer,
encoder_activation_function='sigmoid',
decoder_activation_function='identity'
):
with self.graph.as_default():
self.n_input_features = n_input_features
self.input = self.input = tf.placeholder(tf.float32, shape=(None, n_input_features), name='input')
self.units_per_hidden_layer = units_per_hidden_layer
self.encoder_activation_function = encoder_activation_function
self.decoder_activation_function = decoder_activation_function
self.keep_probability = tf.placeholder(tf.float32, name='keep_probability')
self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
with tf.name_scope('stack'):
n_inputs = self.n_input_features
current_input = self.input
for i, units in enumerate(self.units_per_hidden_layer):
mask = tf.random_uniform(shape=tf.shape(current_input), minval=0, maxval=1,
dtype=tf.float32, seed=None,
name='weight_initializer_stack_{}'.format(i + 1))
mask = tf.where(mask <= self.keep_probability,
tf.ones_like(current_input, dtype=tf.float32),
tf.zeros_like(current_input, dtype=tf.float32),
name='random_mask_stack_{}'.format(i + 1))
self.corrupted_inputs.append(tf.multiply(current_input, mask,
name='corruped_input_stack_{}'.format(i + 1)))
with tf.name_scope('encoder_stack_{}'.format(i + 1)):
previous_input = self.corrupted_inputs[-1]
activation_function = self.get_activation_function(self.encoder_activation_function)
weights = tf.Variable(tf.truncated_normal([n_inputs, units]), dtype=tf.float32,
name='weights_stack_{}'.format(i + 1))
self.initial_weights.append(weights)
bias = tf.Variable(tf.zeros([units], dtype=tf.float32),
name='bias_stack_{}'.format(i + 1))
self.initial_biases.append(bias)
self.encoders.append(activation_function(tf.add(tf.matmul(previous_input, weights), bias),
name='encoder_stack_{}'.format(i + 1)))
current_input = self.encoders[-1]
with tf.name_scope('decoder_stack_{}'.format(i + 1)):
weights = tf.Variable(tf.truncated_normal([units, n_inputs]), dtype=tf.float32,
name='weights_stack_{}'.format(i + 1))
bias = tf.Variable(tf.zeros([n_inputs], dtype=tf.float32),
name='bias_stack_{}'.format(i + 1))
activation_function = self.get_activation_function(self.decoder_activation_function)
self.decoders.append(activation_function(tf.add(tf.matmul(self.encoders[-1], weights), bias),
name='decoder_stack_{}'.format(i + 1)))
n_inputs = units
self.saver = tf.train.Saver()
def __build_optimizers(self, loss, optimizer):
with self.graph.as_default():
self.loss = loss
self.optimizer = optimizer
label = self.input
losses = []
with tf.name_scope('optimization'):
for i, decoder in enumerate(self.decoders):
lf = self.get_loss(self.loss)
losses.append(tf.reduce_mean(lf(label, decoder)))
optimizer = self.get_optimizer(self.optimizer)(learning_rate=self.learning_rate)
optimizer = optimizer.minimize(losses[-1])
self.optimizers.append(optimizer)
label = self.encoders[i]
with tf.name_scope('loss'):
for i, loss in enumerate(losses):
self.tensorboard.append(tf.summary.scalar('stack_{}'.format(i + 1), loss))
def fit(self, x, steps=1000, batch_size=None, learning_rate=1e-2, loss='mse', optimizer='sgd',
keep_probability=0.75):
with self.graph.as_default():
self.__build_optimizers(loss, optimizer)
self.batch_size = x.shape[0] if batch_size is None else batch_size
current_input = self.input
self.session.run(tf.global_variables_initializer())
self.session.run(tf.local_variables_initializer())
tensorboard_writer, tb = None, None
if self.add_summaries:
tensorboard_path = '{}/{}'.format( self.summary_directory, self.model_name)
tensorboard_writer = tf.summary.FileWriter(tensorboard_path,
tf.get_default_graph())
for i, optimizer in enumerate(self.optimizers):
for step in range(steps):
start, end = 0, min(self.batch_size, x.shape[0])
while start < x.shape[0]:
tb, opt = self.session.run([self.tensorboard[i], optimizer],
feed_dict={current_input: x,
self.learning_rate: learning_rate,
self.keep_probability: keep_probability})
start, end = end, min(x.shape[0], end + self.batch_size)
if self.add_summaries:
log_path = '{0}/{1}/{1}'.format(self.summary_directory, self.model_name)
self.saver.save(self.session, log_path, global_step=step + 1)
tensorboard_writer.add_summary(tb, step + 1)
def get_initial_weights(self):
result = []
for w in self.initial_weights:
result.append(self.session.run(w))
return result
def get_initial_biases(self):
result = []
for b in self.initial_biases:
result.append(self.session.run(b))
return result
| 7,179
| 20
| 185
|
1a1c18495688a5d56eefb780a5845b6038590c52
| 1,757
|
py
|
Python
|
maps/maps-csv.py
|
vanthaiunghoa/ourBlock
|
c106ddc030707c91af1e4ceff6b3a6086f727d8d
|
[
"MIT"
] | null | null | null |
maps/maps-csv.py
|
vanthaiunghoa/ourBlock
|
c106ddc030707c91af1e4ceff6b3a6086f727d8d
|
[
"MIT"
] | null | null | null |
maps/maps-csv.py
|
vanthaiunghoa/ourBlock
|
c106ddc030707c91af1e4ceff6b3a6086f727d8d
|
[
"MIT"
] | 1
|
2019-12-26T18:05:37.000Z
|
2019-12-26T18:05:37.000Z
|
from datetime import datetime
import json
from collections import OrderedDict
import os.path
from math import log
from math import e
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from math import sqrt
import pandas as pd
data = OrderedDict()
weights = []
lat = []
long = []
num_points = 500
if os.path.isfile('maps.json') :
with open ('maps.json', 'r+') as fp:
data = json.load(fp, object_pairs_hook=OrderedDict)
for key in range(len(list(data.keys()))):
if key > num_points:
break
stored = data[list(data.keys())[len(list(data.keys())) - 1 - key]]
month = stored['Month']
day = stored['Day']
year = stored['Year']
lat.append(stored['Latitude'])
long.append(stored['Longitude'])
date = month + " " + str(day) + ", " + str(year)
date_format = "%B %d, %Y"
now = datetime.now()
date_object = datetime.strptime(date, date_format)
delta = now - date_object
num_hours = delta.days*24
if num_hours != 0:
weights.append(sqrt(1.0/num_hours) * 1000)
else:
weights.append(25)
weights = np.array(weights)
weights = weights.reshape(-1, 1)
min_max_scaler = MinMaxScaler(feature_range=(0, 2))
weights = min_max_scaler.fit_transform(np.float32(weights))
weights = weights.tolist()
points = OrderedDict()
long_shit = []
lat_shit = []
weight_shit = []
for i in range(num_points):
long_shit.append(long[i])
lat_shit.append(lat[i])
weight_shit.append(weights[i][0])
df = pd.DataFrame()
df["lng"] = np.array(long_shit)
df['lat'] = np.array(lat_shit)
df ['weight'] = np.array(weight_shit)
df.to_csv('heat_map.csv', index=False)
| 20.916667
| 70
| 0.648264
|
from datetime import datetime
import json
from collections import OrderedDict
import os.path
from math import log
from math import e
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from math import sqrt
import pandas as pd
data = OrderedDict()
weights = []
lat = []
long = []
num_points = 500
if os.path.isfile('maps.json') :
with open ('maps.json', 'r+') as fp:
data = json.load(fp, object_pairs_hook=OrderedDict)
for key in range(len(list(data.keys()))):
if key > num_points:
break
stored = data[list(data.keys())[len(list(data.keys())) - 1 - key]]
month = stored['Month']
day = stored['Day']
year = stored['Year']
lat.append(stored['Latitude'])
long.append(stored['Longitude'])
date = month + " " + str(day) + ", " + str(year)
date_format = "%B %d, %Y"
now = datetime.now()
date_object = datetime.strptime(date, date_format)
delta = now - date_object
num_hours = delta.days*24
if num_hours != 0:
weights.append(sqrt(1.0/num_hours) * 1000)
else:
weights.append(25)
weights = np.array(weights)
weights = weights.reshape(-1, 1)
min_max_scaler = MinMaxScaler(feature_range=(0, 2))
weights = min_max_scaler.fit_transform(np.float32(weights))
weights = weights.tolist()
points = OrderedDict()
long_shit = []
lat_shit = []
weight_shit = []
for i in range(num_points):
long_shit.append(long[i])
lat_shit.append(lat[i])
weight_shit.append(weights[i][0])
df = pd.DataFrame()
df["lng"] = np.array(long_shit)
df['lat'] = np.array(lat_shit)
df ['weight'] = np.array(weight_shit)
df.to_csv('heat_map.csv', index=False)
| 0
| 0
| 0
|
06dcb38567cb606e5cd8337189d802aa2e73c189
| 21,003
|
py
|
Python
|
interpreter/code/byterun/pyvm2.py
|
yunkai123/my-500lines-notes
|
60fd3b18919b5bcb90ddece9e088c1c152438972
|
[
"MIT"
] | null | null | null |
interpreter/code/byterun/pyvm2.py
|
yunkai123/my-500lines-notes
|
60fd3b18919b5bcb90ddece9e088c1c152438972
|
[
"MIT"
] | null | null | null |
interpreter/code/byterun/pyvm2.py
|
yunkai123/my-500lines-notes
|
60fd3b18919b5bcb90ddece9e088c1c152438972
|
[
"MIT"
] | null | null | null |
"""一个纯 Python 实现的 Python 字节码解释器"""
# 改编自
# 1. pyvm2 作者:Paul Swartz,来自 http://www.twistedmatrix.com/users/z3p/
# 2. byterun 作者:Ned Batchelder,github.com/nedbat/byterun
import dis, operator, sys, collections, inspect, types
Block = collections.namedtuple("Block", "type, handler, stack_height")
class Function(object):
"""
创建一个真实的函数对象,定义解释器期望的东西。
"""
# 去掉 '__doc__'
__slots__ = [
'func_code', 'func_name', 'func_defaults', 'func_globals',
'func_locals', 'func_dict', 'func_closure',
'__name__', '__dict__',
'_vm', '_func',
]
def __init__(self, name, code, globs, defaults, closure, vm):
"""你不需要按照这个来理解解释器。"""
self._vm = vm
self.func_code = code
self.func_name = self.__name__ = name or code.co_name
self.func_defaults = tuple(defaults)
self.func_globals = globs
self.func_locals = self._vm.frame.local_names
self.__dict__ = {}
self.func_closure = closure
self.__doc__ = code.co_consts[0] if code.co_consts else None
# 有时我们需要一个真正的 Python 函数,这里就是
kw = {
'argdefs': self.func_defaults,
}
if closure:
kw['closure'] = tuple(make_cell(0) for _ in closure)
# 利用 types 模块的 FunctionType 生成方法
self._func = types.FunctionType(code, globs, **kw)
def __call__(self, *args, **kwargs):
"""调用函数时,创建一个新帧并运行它。"""
# Python 3.6.1更新(bpo-19611):
# 作用域和生成器表达式作用域生成的隐式 .0 参数会变为 implicit0
# 处理的时候需要注意(在 byte_LOAD_FAST 中)
callargs = inspect.getcallargs(self._func, *args, **kwargs)
# 使用 callargs 提供参数的映射:传递到新帧
frame = self._vm.make_frame(
self.func_code, callargs, self.func_globals, {}
)
return self._vm.run_frame(frame)
| 29.918803
| 117
| 0.565491
|
"""一个纯 Python 实现的 Python 字节码解释器"""
# 改编自
# 1. pyvm2 作者:Paul Swartz,来自 http://www.twistedmatrix.com/users/z3p/
# 2. byterun 作者:Ned Batchelder,github.com/nedbat/byterun
import dis, operator, sys, collections, inspect, types
class Frame(object):
def __init__(self, code_obj, global_names, local_names, prev_frame):
self.code_obj = code_obj
self.global_names = global_names
self.local_names = local_names
self.prev_frame = prev_frame
self.stack = []
if prev_frame:
self.builtin_names = prev_frame.builtin_names
else:
self.builtin_names = local_names['__builtins__']
if hasattr(self.builtin_names, '__dict__'):
self.builtin_names = self.builtin_names.__dict__
self.last_instruction = 0
self.block_stack = []
# 数据堆栈操作
def top(self):
return self.stack[-1]
def pop(self):
return self.stack.pop()
def push(self, *vals):
self.stack.extend(vals)
def popn(self, n):
"""从值堆栈中弹出多个值。
返回一个 `n` 个值的列表,首先返回最深的值
"""
if n:
ret = self.stack[-n:]
self.stack[-n:] = []
return ret
else:
return []
# 块堆栈操作
def push_block(self, b_type, handler=None):
stack_height = len(self.stack)
self.block_stack.append(Block(b_type, handler, stack_height))
def pop_block(self):
return self.block_stack.pop()
def unwind_block(self, block):
"""当给定的块完成时,展开数据栈上的值"""
if block.type == 'except-handler':
# 异常在堆栈上包含 type、 value 和 traceback
offset = 3
else:
offset = 0
while len(self.stack) > block.stack_height + offset:
self.pop()
if block.type == 'except-handler':
traceback, value, exctype = self.popn(3)
return exctype, value, traceback
Block = collections.namedtuple("Block", "type, handler, stack_height")
class Function(object):
"""
创建一个真实的函数对象,定义解释器期望的东西。
"""
# 去掉 '__doc__'
__slots__ = [
'func_code', 'func_name', 'func_defaults', 'func_globals',
'func_locals', 'func_dict', 'func_closure',
'__name__', '__dict__',
'_vm', '_func',
]
def __init__(self, name, code, globs, defaults, closure, vm):
"""你不需要按照这个来理解解释器。"""
self._vm = vm
self.func_code = code
self.func_name = self.__name__ = name or code.co_name
self.func_defaults = tuple(defaults)
self.func_globals = globs
self.func_locals = self._vm.frame.local_names
self.__dict__ = {}
self.func_closure = closure
self.__doc__ = code.co_consts[0] if code.co_consts else None
# 有时我们需要一个真正的 Python 函数,这里就是
kw = {
'argdefs': self.func_defaults,
}
if closure:
kw['closure'] = tuple(make_cell(0) for _ in closure)
# 利用 types 模块的 FunctionType 生成方法
self._func = types.FunctionType(code, globs, **kw)
def __call__(self, *args, **kwargs):
"""调用函数时,创建一个新帧并运行它。"""
# Python 3.6.1更新(bpo-19611):
# 作用域和生成器表达式作用域生成的隐式 .0 参数会变为 implicit0
# 处理的时候需要注意(在 byte_LOAD_FAST 中)
callargs = inspect.getcallargs(self._func, *args, **kwargs)
# 使用 callargs 提供参数的映射:传递到新帧
frame = self._vm.make_frame(
self.func_code, callargs, self.func_globals, {}
)
return self._vm.run_frame(frame)
def make_cell(value):
# 创建一个真正的Python闭包并获取一个单元格。
fn = (lambda x: lambda: x)(value)
return fn.__closure__[0]
class VirtualMachineError(Exception):
pass
class VirtualMachine(object):
def __init__(self):
self.frames = [] # 帧的调用堆栈
self.frame = None # 当前帧
self.return_value = None
self.last_exception = None
# 帧处理
def make_frame(self, code, callargs={}, global_names=None, local_names=None):
if global_names is not None and local_names is not None:
local_names = global_names
elif self.frames:
global_names = self.frame.global_names
local_names = {}
else:
global_names = local_names = {
'__builtins__': __builtins__,
'__name__': '__main__',
'__doc__': None,
'__package__': None
}
local_names.update(callargs)
frame = Frame(code, global_names, local_names, self.frame)
return frame
def push_frame(self, frame):
self.frames.append(frame)
self.frame = frame
def pop_frame(self):
self.frames.pop()
if self.frames:
self.frame = self.frames[-1]
else:
self.frame = None
# 跳转字节码
def jump(self, jump):
"""将字节码指针移到 `jump`,以便下一步执行。"""
self.frame.last_instruction = jump
def run_code(self, code, global_names=None, local_names=None):
"""使用虚拟机执行代码的入口点。"""
frame = self.make_frame(code, global_names=global_names, local_names=local_names)
self.run_frame(frame)
# Check some invariants
# if self.frames:
# raise VirtualMachineError("Frames left over!")
# if self.frame and self.frame.stack:
# raise VirtualMachineError("Data left on stack! %r" % self.frame.stack)
# for testing, was val = self.run_frame(frame)
# return val # for testing
def parse_byte_and_args(self):
"""解析字节码中的指令和参数
Python 3.6以上的版本中,每条指令均占2字节,一个字节指令,一个字节参数
"""
f = self.frame
opoffset = f.last_instruction
byteCode = f.code_obj.co_code[opoffset]
f.last_instruction += 1
byte_name = dis.opname[byteCode]
if byteCode >= dis.HAVE_ARGUMENT:
# 索引到字节码中
arg = f.code_obj.co_code[f.last_instruction]
f.last_instruction += 1 # 前进指令指针
arg_val = int(arg)
if byteCode in dis.hasconst: # 查找常量
arg = f.code_obj.co_consts[arg_val]
elif byteCode in dis.hasname: # 查找名称
arg = f.code_obj.co_names[arg_val]
elif byteCode in dis.haslocal: # 查找本地名称
arg = f.code_obj.co_varnames[arg_val]
elif byteCode in dis.hasjrel: # 计算相对跳转
arg = f.last_instruction + arg_val
else:
arg = arg_val
argument = [arg]
else:
f.last_instruction += 1 # 即使没有参数也要前进
argument = []
return byte_name, argument
def dispatch(self, byte_name, argument):
"""按 bytename 分派到相应的方法。
在虚拟机上捕获并设置异常"""
# 稍后展开块堆栈时,
# 我们需要知道我们为什么要这么做。
why = None
try:
bytecode_fn = getattr(self, 'byte_%s' % byte_name, None)
if bytecode_fn is None:
if byte_name.startswith('UNARY_'):
self.unaryOperator(byte_name[6:])
elif byte_name.startswith('BINARY_'):
self.binaryOperator(byte_name[7:])
else:
raise VirtualMachineError(
"unsupported bytecode type: %s" % byte_name
)
else:
why = bytecode_fn(*argument)
except:
# 处理执行操作时遇到的异常。
self.last_exception = sys.exc_info()[:2] + (None,)
why = 'exception'
return why
def manage_block_stack(self, why):
block = self.frame.block_stack[-1]
if block.type == 'loop' and why == 'continue':
self.jump(self.return_value)
why = None
return why
self.frame.pop_block()
current_exc = self.frame.unwind_block(block)
if current_exc is not None:
self.last_exception = current_exc
if block.type == 'loop' and why == 'break':
self.jump(block.handler)
why = None
elif (block.type in ['setup-except', 'finally'] and why == 'exception'):
self.frame.push_block('except-handler')
exctype, value, tb = self.last_exception
self.frame.push(tb, value, exctype)
self.frame.push(tb, value, exctype) # 2次
self.jump(block.handler)
why = None
elif block.type == 'finally':
if why in ('return', 'continue'):
self.frame.push(self.return_value)
self.frame.push(why)
self.jump(block.handler)
why = None
return why
def run_frame(self, frame):
"""运行一个帧,直到它(以某种方式)返回。
异常被抛出,或者返回值被返回
"""
self.push_frame(frame)
while True:
byte_name, argument = self.parse_byte_and_args()
why = self.dispatch(byte_name, argument)
# 处理我们需要做的任何块管理
while why and frame.block_stack:
why = self.manage_block_stack(why)
if why:
break
self.pop_frame()
if why == 'exception':
exc, val, tb = self.last_exception
e = exc(val)
e.__traceback__ = tb
raise e
return self.return_value
## 堆栈操作
def byte_LOAD_CONST(self, const):
self.frame.push(const)
def byte_POP_TOP(self):
self.frame.pop()
def byte_DUP_TOP(self):
self.frame.push(self.frame.top())
## 名称
def byte_LOAD_NAME(self, name):
frame = self.frame
if name in frame.local_names:
val = frame.local_names[name]
elif name in frame.global_names:
val = frame.global_names[name]
elif name in frame.builtin_names:
val = frame.builtin_names[name]
else:
raise NameError("name '%s' is not defined" % name)
self.frame.push(val)
def byte_STORE_NAME(self, name):
self.frame.local_names[name] = self.frame.pop()
def byte_DELETE_NAME(self, name):
del self.frame.local_names[name]
def byte_LOAD_FAST(self, name):
# 特殊处理作用域和生成器表达式作用域生成的隐式.0参数
if name == '.0':
name = 'implicit0'
if name in self.frame.local_names:
val = self.frame.local_names[name]
else:
raise UnboundLocalError(
"local variable '%s' referenced before assignment" % name
)
self.frame.push(val)
def byte_STORE_FAST(self, name):
self.frame.local_names[name] = self.frame.pop()
def byte_LOAD_GLOBAL(self, name):
f = self.frame
if name in f.global_names:
val = f.global_names[name]
elif name in f.builtin_names:
val = f.builtin_names[name]
else:
# 异常提示信息中不再包含 global
# raise NameError("global name '%s' is not defined" % name)
raise NameError("name '%s' is not defined" % name)
f.push(val)
## 操作符
UNARY_OPERATORS = {
'POSITIVE': operator.pos,
'NEGATIVE': operator.neg,
'NOT': operator.not_,
'INVERT': operator.invert,
}
def unaryOperator(self, op):
x = self.frame.pop()
self.frame.push(self.UNARY_OPERATORS[op](x))
BINARY_OPERATORS = {
'POWER': pow,
'MULTIPLY': operator.mul,
'FLOOR_DIVIDE': operator.floordiv,
'TRUE_DIVIDE': operator.truediv,
'MODULO': operator.mod,
'ADD': operator.add,
'SUBTRACT': operator.sub,
'SUBSCR': operator.getitem,
'LSHIFT': operator.lshift,
'RSHIFT': operator.rshift,
'AND': operator.and_,
'XOR': operator.xor,
'OR': operator.or_,
}
def binaryOperator(self, op):
x, y = self.frame.popn(2)
self.frame.push(self.BINARY_OPERATORS[op](x, y))
COMPARE_OPERATORS = [
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
lambda x, y: x in y,
lambda x, y: x not in y,
lambda x, y: x is y,
lambda x, y: x is not y,
lambda x, y: issubclass(x, Exception) and issubclass(x, y),
]
def byte_COMPARE_OP(self, opnum):
x, y = self.frame.popn(2)
self.frame.push(self.COMPARE_OPERATORS[opnum](x, y))
## 属性和索引
def byte_LOAD_ATTR(self, attr):
obj = self.frame.pop()
val = getattr(obj, attr)
self.frame.push(val)
def byte_STORE_ATTR(self, name):
val, obj = self.frame.popn(2)
setattr(obj, name, val)
def byte_STORE_SUBSCR(self):
val, obj, subscr = self.frame.popn(3)
obj[subscr] = val
## 构建
def byte_BUILD_TUPLE(self, count):
elts = self.frame.popn(count)
e = tuple(elts)
self.frame.push(e)
def byte_BUILD_LIST(self, count):
elts = self.frame.popn(count)
self.frame.push(elts)
def byte_BUILD_MAP(self, size):
self.frame.push({})
# Python3.6 新增
def byte_BUILD_CONST_KEY_MAP(self, size):
"""
The version of BUILD_MAP specialized for constant keys.
Pops the top element on the stack which contains a tuple
of keys, then starting from TOS1, pops count values to
form values in the built dictionary.
"""
the_map = {}
keys = self.frame.pop()
vals = self.frame.popn(size)
for i in range(size):
the_map[keys[i]] = vals[i]
self.frame.push(the_map)
def byte_STORE_MAP(self):
the_map, val, key = self.frame.popn(3)
the_map[key] = val
self.frame.push(the_map)
def byte_UNPACK_SEQUENCE(self, count):
seq = self.frame.pop()
for x in reversed(seq):
self.frame.push(x)
def byte_BUILD_SLICE(self, count):
if count == 2:
x, y = self.frame.popn(2)
self.frame.push(slice(x, y))
elif count == 3:
x, y, z = self.frame.popn(3)
self.frame.push(slice(x, y, z))
else: # pragma: no cover
raise VirtualMachineError("Strange BUILD_SLICE count: %r" % count)
def byte_LIST_APPEND(self, count):
val = self.frame.pop()
the_list = self.frame.stack[-count] # peek
the_list.append(val)
## 跳转
def byte_JUMP_FORWARD(self, jump):
self.jump(jump)
def byte_JUMP_ABSOLUTE(self, jump):
self.jump(jump)
def byte_POP_JUMP_IF_TRUE(self, jump):
val = self.frame.pop()
if val:
self.jump(jump)
def byte_POP_JUMP_IF_FALSE(self, jump):
val = self.frame.pop()
if not val:
self.jump(jump)
def byte_JUMP_IF_TRUE_OR_POP(self, jump):
val = self.frame.top()
if val:
self.jump(jump)
else:
self.frame.pop()
def byte_JUMP_IF_FALSE_OR_POP(self, jump):
val = self.frame.top()
if not val:
self.jump(jump)
else:
self.frame.pop()
## 块
def byte_SETUP_LOOP(self, dest):
self.frame.push_block('loop', dest)
def byte_GET_ITER(self):
self.frame.push(iter(self.frame.pop()))
def byte_FOR_ITER(self, jump):
iterobj = self.frame.top()
try:
v = next(iterobj)
self.frame.push(v)
except StopIteration:
self.frame.pop()
self.jump(jump)
def byte_BREAK_LOOP(self):
return 'break'
def byte_CONTINUE_LOOP(self, dest):
# 这是一个返回值的技巧,展开块时,continue 和 return 都必须
# 在执行 finally 块时保持状态。
# 对于 continnue,它是跳转到哪里,对于 return,它是返回的值。
# 它被压入堆栈,多以 continue 将跳转目的放入 return_value。
self.return_value = dest
return 'continue'
def byte_SETUP_EXCEPT(self, dest):
self.frame.push_block('setup-except', dest)
def byte_SETUP_FINALLY(self, dest):
self.frame.push_block('finally', dest)
def byte_BEGIN_FINALLY(self, const):
self.byte_LOAD_CONST(const)
def byte_END_FINALLY(self):
v = self.pop()
if isinstance(v, str):
why = v
if why in ('return', 'continue'):
self.return_value = self.pop()
if why == 'silenced': # PY3
block = self.pop_block()
assert block.type == 'except-handler'
self.unwind_block(block)
why = None
elif v is None:
why = None
elif issubclass(v, BaseException):
exctype = v
val = self.pop()
tb = self.pop()
self.last_exception = (exctype, val, tb)
why = 'reraise'
else: # pragma: no cover
raise VirtualMachineError("Confused END_FINALLY")
return why
def byte_POP_BLOCK(self):
self.frame.pop_block()
def byte_RAISE_VARARGS(self, argc):
cause = exc = None
if argc == 2:
cause = self.frame.pop()
exc = self.frame.pop()
elif argc == 1:
exc = self.frame.pop()
return self.do_raise(exc, cause)
def do_raise(self, exc, cause):
if exc is None: # 重新引发
exc_type, val, tb = self.last_exception
elif type(exc) == type: # 像 `raise ValueError`
exc_type = exc
val = exc() # 生成一个实例
elif isinstance(exc, BaseException):
# 像 `raise ValueError('foo')`
exc_type = type(exc)
val = exc
else:
return 'exception' # 失败
self.last_exception = exc_type, val, val.__traceback__
return 'exception'
def byte_POP_EXCEPT(self):
block = self.frame.pop_block()
if block.type != 'except-handler':
raise Exception("popped block is not an except handler")
current_exc = self.frame.unwind_block(block)
if current_exc is not None:
self.last_exception = current_exc
## 函数和方法
def byte_LOAD_METHOD(self, arg):
self.byte_LOAD_ATTR(arg)
def byte_CALL_METHOD(self, arg):
self.byte_CALL_FUNCTION(arg)
def byte_MAKE_FUNCTION(self, argc):
# 这个方法在3.6变了
# argc代表 flags
# 0x01 a tuple of default values for positional-only and positional-or-keyword parameters in positional order
# 0x02 a dictionary of keyword-only parameters’ default values
# 0x04 an annotation dictionary
# 0x08 a tuple containing cells for free variables, making a closure
name = self.frame.pop()
code = self.frame.pop()
# 这个地方还需要优化
defaults = self.frame.pop() if (argc & 1) == 1 else []
globs = self.frame.global_names
#TODO: if we're not supporting kwargs, do we need the defaults?
fn = Function(name, code, globs, defaults, None, self)
self.frame.push(fn)
def byte_CALL_FUNCTION(self, arg):
lenKw, lenPos = divmod(arg, 256) # 前8位 KWargs not supported in byterun
# 仅对只有positional arguments有效
posargs = self.frame.popn(lenPos)
func = self.frame.pop()
retval = func(*posargs)
self.frame.push(retval)
def byte_RETURN_VALUE(self):
self.return_value = self.frame.pop()
return "return"
## 导入
def byte_IMPORT_NAME(self, name):
level, fromlist = self.frame.popn(2)
frame = self.frame
self.frame.push(__import__(name, frame.global_names,
frame.local_names, fromlist, level))
def byte_IMPORT_FROM(self, name):
mod = self.frame.top()
self.frame.push(getattr(mod, name))
## 其它...
def byte_STORE_LOCALS(self):
self.frame.local_names = self.frame.pop()
def byte_LOAD_BUILD_CLASS(self):
self.frame.push(self.build_class)
def build_class(self, func, name, *bases, **kwds):
if not isinstance(func, Function):
raise TypeError("func must be a Function")
if not isinstance(name, str):
raise TypeError("name must be string")
metaclass = kwds.pop('metaclass', None)
if metaclass is None:
metaclass = type(bases[0]) if bases else type
if isinstance(metaclass, type):
metaclass = self.calculate_metaclass(metaclass, bases)
void = object()
prepare = getattr(metaclass, '__prepare__', void)
namespace = {} if prepare is void else prepare(name, bases, **kwds)
frame = Frame(func.func_code, func.func_globals, namespace, self.frame)
self.run_frame(frame)
cls = metaclass(name, bases, namespace)
return cls
def calculate_metaclass(self, metaclass, bases):
winner = metaclass
for base in bases:
t = type(base)
if issubclass(t, winner):
winner = t
elif not issubclass(winner, t):
raise TypeError("metaclass conflict", winner, t)
return winner
| 11,969
| 8,096
| 92
|
a825d0701ed6a068133819b7f8a0e257d16df555
| 31,029
|
py
|
Python
|
src/spdc_inv/utils/utils.py
|
EyalRozenberg1/SPDCinv
|
036910a94d850ade312d593b1c18c3896093b049
|
[
"Apache-2.0"
] | 4
|
2021-12-11T21:05:35.000Z
|
2022-01-05T02:52:15.000Z
|
src/spdc_inv/utils/utils.py
|
EyalRozenberg1/SPDCinv
|
036910a94d850ade312d593b1c18c3896093b049
|
[
"Apache-2.0"
] | null | null | null |
src/spdc_inv/utils/utils.py
|
EyalRozenberg1/SPDCinv
|
036910a94d850ade312d593b1c18c3896093b049
|
[
"Apache-2.0"
] | 1
|
2022-01-10T17:09:53.000Z
|
2022-01-10T17:09:53.000Z
|
from abc import ABC
from jax.ops import index_update, index_add, index
from typing import List, Union, Any
from spdc_inv.utils.defaults import QUBIT
import scipy.special as sp
import jax.numpy as np
import math
# Constants:
pi = np.pi
c = 2.99792458e8 # speed of light [meter/sec]
eps0 = 8.854187817e-12 # vacuum permittivity [Farad/meter]
h_bar = 1.054571800e-34 # [m^2 kg / s], taken from http://physics.nist.gov/cgi-bin/cuu/Value?hbar|search_for=planck
# lambda functions:
G1_Normalization = lambda w: h_bar * w / (2 * eps0 * c)
I = lambda A, n: 2 * n * eps0 * c * np.abs(A) ** 2 # Intensity
Power2D = lambda A, n, dx, dy: np.sum(I(A, n)) * dx * dy
# Compute the idler wavelength given pump and signal
SFG_idler_wavelength = lambda lambda_p, lambda_s: lambda_p * lambda_s / (lambda_s - lambda_p)
def PP_crystal_slab(
delta_k,
z,
crystal_profile,
inference=None
):
"""
Periodically poled crystal slab.
create the crystal slab at point z in the crystal, for poling period 2pi/delta_k
Parameters
----------
delta_k: k mismatch
z: longitudinal point for generating poling pattern
crystal_profile: Crystal 3D hologram (if None, ignore)
inference: (True/False) if in inference mode, we include more coefficients in the poling
description for better validation
Returns Periodically poled crystal slab at point z
-------
"""
if crystal_profile is None:
return np.sign(np.cos(np.abs(delta_k) * z))
else:
magnitude = np.abs(crystal_profile)
phase = np.angle(crystal_profile)
if inference is not None:
max_order_fourier = 20
poling = 0
magnitude = magnitude / magnitude.max()
DutyCycle = np.arcsin(magnitude) / np.pi
for m in range(max_order_fourier):
if m == 0:
poling = poling + 2 * DutyCycle - 1
else:
poling = poling + (2 / (m * np.pi)) * \
np.sin(m * pi * DutyCycle) * 2 * np.cos(m * phase + m * np.abs(delta_k) * z)
return poling
else:
return (2 / np.pi) * np.exp(1j * (np.abs(delta_k) * z)) * magnitude * np.exp(1j * phase)
def HermiteBank(
lam,
refractive_index,
W0,
max_mode_x,
max_mode_y,
x,
y,
z=0
):
"""
generates a dictionary of Hermite Gauss basis functions
Parameters
----------
lam; wavelength
refractive_index: refractive index
W0: beam waist
max_mode_x: maximum projection mode 1st axis
max_mode_y: maximum projection mode 2nd axis
x: transverse points, x axis
y: transverse points, y axis
z: projection longitudinal position
Returns
-------
dictionary of Hermite Gauss basis functions
"""
Hermite_dict = {}
for nx in range(max_mode_x):
for ny in range(max_mode_y):
Hermite_dict[f'|HG{nx}{ny}>'] = Hermite_gauss(lam, refractive_index, W0, nx, ny, z, x, y)
return np.array(list(Hermite_dict.values())), [*Hermite_dict]
def LaguerreBank(
lam,
refractive_index,
W0,
max_mode_p,
max_mode_l,
x,
y,
z=0,
get_dict: bool = False,
):
"""
generates a dictionary of Laguerre Gauss basis functions
Parameters
----------
lam; wavelength
refractive_index: refractive index
W0: beam waist
max_mode_p: maximum projection mode 1st axis
max_mode_l: maximum projection mode 2nd axis
x: transverse points, x axis
y: transverse points, y axis
z: projection longitudinal position
get_dict: (True/False) if True, the function will return a dictionary,
else the dictionary is splitted to basis functions np.array and list of dictionary keys.
Returns
-------
dictionary of Laguerre Gauss basis functions
"""
Laguerre_dict = {}
for p in range(max_mode_p):
for l in range(-max_mode_l, max_mode_l + 1):
Laguerre_dict[f'|LG{p}{l}>'] = Laguerre_gauss(lam, refractive_index, W0, l, p, z, x, y)
if get_dict:
return Laguerre_dict
return np.array(list(Laguerre_dict.values())), [*Laguerre_dict]
def TomographyBankLG(
lam,
refractive_index,
W0,
max_mode_p,
max_mode_l,
x,
y,
z=0,
relative_phase: List[Union[Union[int, float], Any]] = None,
tomography_quantum_state: str = None,
):
"""
generates a dictionary of basis function with projections into two orthogonal LG bases and mutually unbiased
bases (MUBs). The MUBs are constructed from superpositions of the two orthogonal LG bases.
according to: https://doi.org/10.1364/AOP.11.000067
Parameters
----------
lam; wavelength
refractive_index: refractive index
W0: beam waist
max_mode_p: maximum projection mode 1st axis
max_mode_l: maximum projection mode 2nd axis
x: transverse points, x axis
y: transverse points, y axis
z: projection longitudinal position
relative_phase: The relative phase between the mutually unbiased bases (MUBs) states
tomography_quantum_state: the current quantum state we calculate it tomography matrix.
currently we support: qubit/qutrit
Returns
-------
dictionary of bases functions used for constructing the tomography matrix
"""
TOMO_dict = \
LaguerreBank(
lam,
refractive_index,
W0,
max_mode_p,
max_mode_l,
x, y, z,
get_dict=True)
if tomography_quantum_state is QUBIT:
del TOMO_dict['|LG00>']
LG_modes, LG_string = np.array(list(TOMO_dict.values())), [*TOMO_dict]
for m in range(len(TOMO_dict) - 1, -1, -1):
for n in range(m - 1, -1, -1):
for k in range(len(relative_phase)):
TOMO_dict[f'{LG_string[m]}+e^j{str(relative_phase[k]/np.pi)}π{LG_string[n]}'] = \
(1 / np.sqrt(2)) * (LG_modes[m] + np.exp(1j * relative_phase[k]) * LG_modes[n])
return np.array(list(TOMO_dict.values())), [*TOMO_dict]
def TomographyBankHG(
lam,
refractive_index,
W0,
max_mode_x,
max_mode_y,
x,
y,
z=0,
relative_phase: List[Union[Union[int, float], Any]] = None,
tomography_quantum_state: str = None,
):
"""
generates a dictionary of basis function with projections into two orthogonal HG bases and mutually unbiased
bases (MUBs). The MUBs are constructed from superpositions of the two orthogonal HG bases.
according to: https://doi.org/10.1364/AOP.11.000067
Parameters
----------
lam; wavelength
refractive_index: refractive index
W0: beam waist
max_mode_x: maximum projection mode 1st axis
max_mode_y: maximum projection mode 2nd axis
x: transverse points, x axis
y: transverse points, y axis
z: projection longitudinal position
relative_phase: The relative phase between the mutually unbiased bases (MUBs) states
tomography_quantum_state: the current quantum state we calculate it tomography matrix.
currently we support: qubit
Returns
-------
dictionary of bases functions used for constructing the tomography matrix
"""
TOMO_dict = \
HermiteBank(
lam,
refractive_index,
W0,
max_mode_x,
max_mode_y,
x, y, z,
get_dict=True)
if tomography_quantum_state is QUBIT:
del TOMO_dict['|HG00>']
del TOMO_dict['|HG11>']
HG_modes, HG_string = np.array(list(TOMO_dict.values())), [*TOMO_dict]
for m in range(len(TOMO_dict) - 1, -1, -1):
for n in range(m - 1, -1, -1):
for k in range(len(relative_phase)):
TOMO_dict[f'{HG_string[m]}+e^j{str(relative_phase[k]/np.pi)}π{HG_string[n]}'] = \
(1 / np.sqrt(2)) * (HG_modes[m] + np.exp(1j * relative_phase[k]) * HG_modes[n])
return np.array(list(TOMO_dict.values())), [*TOMO_dict]
def Hermite_gauss(lam, refractive_index, W0, nx, ny, z, X, Y, coef=None):
"""
Hermite Gauss in 2D
Parameters
----------
lam: wavelength
refractive_index: refractive index
W0: beam waists
n, m: order of the HG beam
z: the place in z to calculate for
x,y: matrices of x and y
coef
Returns
-------
Hermite-Gaussian beam of order n,m in 2D
"""
k = 2 * np.pi * refractive_index / lam
z0 = np.pi * W0 ** 2 * refractive_index / lam # Rayleigh range
Wz = W0 * np.sqrt(1 + (z / z0) ** 2) # w(z), the variation of the spot size
invR = z / ((z ** 2) + (z0 ** 2)) # radius of curvature
gouy = (nx + ny + 1)*np.arctan(z/z0)
if coef is None:
coefx = np.sqrt(np.sqrt(2/pi) / (2**nx * math.factorial(nx)))
coefy = np.sqrt(np.sqrt(2/pi) / (2**ny * math.factorial(ny)))
coef = coefx * coefy
U = coef * \
(W0/Wz) * np.exp(-(X**2 + Y**2) / Wz**2) * \
HermiteP(nx, np.sqrt(2) * X / Wz) * \
HermiteP(ny, np.sqrt(2) * Y / Wz) * \
np.exp(-1j * (k * (X**2 + Y**2) / 2) * invR) * \
np.exp(1j * gouy)
return U
def Laguerre_gauss(lam, refractive_index, W0, l, p, z, x, y, coef=None):
"""
Laguerre Gauss in 2D
Parameters
----------
lam: wavelength
refractive_index: refractive index
W0: beam waists
l, p: order of the LG beam
z: the place in z to calculate for
x,y: matrices of x and y
coef
Returns
-------
Laguerre-Gaussian beam of order l,p in 2D
"""
k = 2 * np.pi * refractive_index / lam
z0 = np.pi * W0 ** 2 * refractive_index / lam # Rayleigh range
Wz = W0 * np.sqrt(1 + (z / z0) ** 2) # w(z), the variation of the spot size
r = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
invR = z / ((z ** 2) + (z0 ** 2)) # radius of curvature
gouy = (np.abs(l)+2*p+1)*np.arctan(z/z0)
if coef is None:
coef = np.sqrt(2*math.factorial(p)/(np.pi * math.factorial(p + np.abs(l))))
U = coef * \
(W0/Wz)*(r*np.sqrt(2)/Wz)**(np.abs(l)) * \
np.exp(-r**2 / Wz**2) * \
LaguerreP(p, l, 2 * r**2 / Wz**2) * \
np.exp(-1j * (k * r**2 / 2) * invR) * \
np.exp(-1j * l * phi) * \
np.exp(1j * gouy)
return U
def HermiteP(n, x):
"""
Hermite polynomial of rank n Hn(x)
Parameters
----------
n: order of the LG beam
x: matrix of x
Returns
-------
Hermite polynomial
"""
if n == 0:
return 1
elif n == 1:
return 2 * x
else:
return 2 * x * HermiteP(n - 1, x) - 2 * (n - 1) * HermiteP(n - 2, x)
def LaguerreP(p, l, x):
"""
Generalized Laguerre polynomial of rank p,l L_p^|l|(x)
Parameters
----------
l, p: order of the LG beam
x: matrix of x
Returns
-------
Generalized Laguerre polynomial
"""
if p == 0:
return 1
elif p == 1:
return 1 + np.abs(l)-x
else:
return ((2*p-1+np.abs(l)-x)*LaguerreP(p-1, l, x) - (p-1+np.abs(l))*LaguerreP(p-2, l, x))/p
class Beam(ABC):
"""
A class that holds everything to do with a beam
"""
def __init__(self,
lam: float,
ctype,
polarization: str,
T: float,
power: float = 0):
"""
Parameters
----------
lam: beam's wavelength
ctype: function that holds crystal type fo calculating refractive index
polarization: Polarization of the beam
T: crystal's temperature [Celsius Degrees]
power: beam power [watt]
"""
self.lam = lam
self.n = ctype(lam * 1e6, T, polarization) # refractive index
self.w = 2 * np.pi * c / lam # frequency
self.k = 2 * np.pi * ctype(lam * 1e6, T, polarization) / lam # wave vector
self.power = power # beam power
def fix_power(
A,
power,
n,
dx,
dy
):
"""
The function takes a field A and normalizes in to have the power indicated
Parameters
----------
A
power
n
dx
dy
Returns
-------
"""
output = A * np.sqrt(power) / np.sqrt(Power2D(A, n, dx, dy))
return output
class DensMat(ABC):
"""
A class that holds tomography dimensions and
tensors used for calculating qubit and qutrit tomography
"""
| 34.476667
| 118
| 0.530053
|
from abc import ABC
from jax.ops import index_update, index_add, index
from typing import List, Union, Any
from spdc_inv.utils.defaults import QUBIT
import scipy.special as sp
import jax.numpy as np
import math
# Constants:
pi = np.pi
c = 2.99792458e8 # speed of light [meter/sec]
eps0 = 8.854187817e-12 # vacuum permittivity [Farad/meter]
h_bar = 1.054571800e-34 # [m^2 kg / s], taken from http://physics.nist.gov/cgi-bin/cuu/Value?hbar|search_for=planck
# lambda functions:
G1_Normalization = lambda w: h_bar * w / (2 * eps0 * c)
I = lambda A, n: 2 * n * eps0 * c * np.abs(A) ** 2 # Intensity
Power2D = lambda A, n, dx, dy: np.sum(I(A, n)) * dx * dy
# Compute the idler wavelength given pump and signal
SFG_idler_wavelength = lambda lambda_p, lambda_s: lambda_p * lambda_s / (lambda_s - lambda_p)
def PP_crystal_slab(
delta_k,
z,
crystal_profile,
inference=None
):
"""
Periodically poled crystal slab.
create the crystal slab at point z in the crystal, for poling period 2pi/delta_k
Parameters
----------
delta_k: k mismatch
z: longitudinal point for generating poling pattern
crystal_profile: Crystal 3D hologram (if None, ignore)
inference: (True/False) if in inference mode, we include more coefficients in the poling
description for better validation
Returns Periodically poled crystal slab at point z
-------
"""
if crystal_profile is None:
return np.sign(np.cos(np.abs(delta_k) * z))
else:
magnitude = np.abs(crystal_profile)
phase = np.angle(crystal_profile)
if inference is not None:
max_order_fourier = 20
poling = 0
magnitude = magnitude / magnitude.max()
DutyCycle = np.arcsin(magnitude) / np.pi
for m in range(max_order_fourier):
if m == 0:
poling = poling + 2 * DutyCycle - 1
else:
poling = poling + (2 / (m * np.pi)) * \
np.sin(m * pi * DutyCycle) * 2 * np.cos(m * phase + m * np.abs(delta_k) * z)
return poling
else:
return (2 / np.pi) * np.exp(1j * (np.abs(delta_k) * z)) * magnitude * np.exp(1j * phase)
def HermiteBank(
lam,
refractive_index,
W0,
max_mode_x,
max_mode_y,
x,
y,
z=0
):
"""
generates a dictionary of Hermite Gauss basis functions
Parameters
----------
lam; wavelength
refractive_index: refractive index
W0: beam waist
max_mode_x: maximum projection mode 1st axis
max_mode_y: maximum projection mode 2nd axis
x: transverse points, x axis
y: transverse points, y axis
z: projection longitudinal position
Returns
-------
dictionary of Hermite Gauss basis functions
"""
Hermite_dict = {}
for nx in range(max_mode_x):
for ny in range(max_mode_y):
Hermite_dict[f'|HG{nx}{ny}>'] = Hermite_gauss(lam, refractive_index, W0, nx, ny, z, x, y)
return np.array(list(Hermite_dict.values())), [*Hermite_dict]
def LaguerreBank(
lam,
refractive_index,
W0,
max_mode_p,
max_mode_l,
x,
y,
z=0,
get_dict: bool = False,
):
"""
generates a dictionary of Laguerre Gauss basis functions
Parameters
----------
lam; wavelength
refractive_index: refractive index
W0: beam waist
max_mode_p: maximum projection mode 1st axis
max_mode_l: maximum projection mode 2nd axis
x: transverse points, x axis
y: transverse points, y axis
z: projection longitudinal position
get_dict: (True/False) if True, the function will return a dictionary,
else the dictionary is splitted to basis functions np.array and list of dictionary keys.
Returns
-------
dictionary of Laguerre Gauss basis functions
"""
Laguerre_dict = {}
for p in range(max_mode_p):
for l in range(-max_mode_l, max_mode_l + 1):
Laguerre_dict[f'|LG{p}{l}>'] = Laguerre_gauss(lam, refractive_index, W0, l, p, z, x, y)
if get_dict:
return Laguerre_dict
return np.array(list(Laguerre_dict.values())), [*Laguerre_dict]
def TomographyBankLG(
lam,
refractive_index,
W0,
max_mode_p,
max_mode_l,
x,
y,
z=0,
relative_phase: List[Union[Union[int, float], Any]] = None,
tomography_quantum_state: str = None,
):
"""
generates a dictionary of basis function with projections into two orthogonal LG bases and mutually unbiased
bases (MUBs). The MUBs are constructed from superpositions of the two orthogonal LG bases.
according to: https://doi.org/10.1364/AOP.11.000067
Parameters
----------
lam; wavelength
refractive_index: refractive index
W0: beam waist
max_mode_p: maximum projection mode 1st axis
max_mode_l: maximum projection mode 2nd axis
x: transverse points, x axis
y: transverse points, y axis
z: projection longitudinal position
relative_phase: The relative phase between the mutually unbiased bases (MUBs) states
tomography_quantum_state: the current quantum state we calculate it tomography matrix.
currently we support: qubit/qutrit
Returns
-------
dictionary of bases functions used for constructing the tomography matrix
"""
TOMO_dict = \
LaguerreBank(
lam,
refractive_index,
W0,
max_mode_p,
max_mode_l,
x, y, z,
get_dict=True)
if tomography_quantum_state is QUBIT:
del TOMO_dict['|LG00>']
LG_modes, LG_string = np.array(list(TOMO_dict.values())), [*TOMO_dict]
for m in range(len(TOMO_dict) - 1, -1, -1):
for n in range(m - 1, -1, -1):
for k in range(len(relative_phase)):
TOMO_dict[f'{LG_string[m]}+e^j{str(relative_phase[k]/np.pi)}π{LG_string[n]}'] = \
(1 / np.sqrt(2)) * (LG_modes[m] + np.exp(1j * relative_phase[k]) * LG_modes[n])
return np.array(list(TOMO_dict.values())), [*TOMO_dict]
def TomographyBankHG(
lam,
refractive_index,
W0,
max_mode_x,
max_mode_y,
x,
y,
z=0,
relative_phase: List[Union[Union[int, float], Any]] = None,
tomography_quantum_state: str = None,
):
"""
generates a dictionary of basis function with projections into two orthogonal HG bases and mutually unbiased
bases (MUBs). The MUBs are constructed from superpositions of the two orthogonal HG bases.
according to: https://doi.org/10.1364/AOP.11.000067
Parameters
----------
lam; wavelength
refractive_index: refractive index
W0: beam waist
max_mode_x: maximum projection mode 1st axis
max_mode_y: maximum projection mode 2nd axis
x: transverse points, x axis
y: transverse points, y axis
z: projection longitudinal position
relative_phase: The relative phase between the mutually unbiased bases (MUBs) states
tomography_quantum_state: the current quantum state we calculate it tomography matrix.
currently we support: qubit
Returns
-------
dictionary of bases functions used for constructing the tomography matrix
"""
TOMO_dict = \
HermiteBank(
lam,
refractive_index,
W0,
max_mode_x,
max_mode_y,
x, y, z,
get_dict=True)
if tomography_quantum_state is QUBIT:
del TOMO_dict['|HG00>']
del TOMO_dict['|HG11>']
HG_modes, HG_string = np.array(list(TOMO_dict.values())), [*TOMO_dict]
for m in range(len(TOMO_dict) - 1, -1, -1):
for n in range(m - 1, -1, -1):
for k in range(len(relative_phase)):
TOMO_dict[f'{HG_string[m]}+e^j{str(relative_phase[k]/np.pi)}π{HG_string[n]}'] = \
(1 / np.sqrt(2)) * (HG_modes[m] + np.exp(1j * relative_phase[k]) * HG_modes[n])
return np.array(list(TOMO_dict.values())), [*TOMO_dict]
def Hermite_gauss(lam, refractive_index, W0, nx, ny, z, X, Y, coef=None):
"""
Hermite Gauss in 2D
Parameters
----------
lam: wavelength
refractive_index: refractive index
W0: beam waists
n, m: order of the HG beam
z: the place in z to calculate for
x,y: matrices of x and y
coef
Returns
-------
Hermite-Gaussian beam of order n,m in 2D
"""
k = 2 * np.pi * refractive_index / lam
z0 = np.pi * W0 ** 2 * refractive_index / lam # Rayleigh range
Wz = W0 * np.sqrt(1 + (z / z0) ** 2) # w(z), the variation of the spot size
invR = z / ((z ** 2) + (z0 ** 2)) # radius of curvature
gouy = (nx + ny + 1)*np.arctan(z/z0)
if coef is None:
coefx = np.sqrt(np.sqrt(2/pi) / (2**nx * math.factorial(nx)))
coefy = np.sqrt(np.sqrt(2/pi) / (2**ny * math.factorial(ny)))
coef = coefx * coefy
U = coef * \
(W0/Wz) * np.exp(-(X**2 + Y**2) / Wz**2) * \
HermiteP(nx, np.sqrt(2) * X / Wz) * \
HermiteP(ny, np.sqrt(2) * Y / Wz) * \
np.exp(-1j * (k * (X**2 + Y**2) / 2) * invR) * \
np.exp(1j * gouy)
return U
def Laguerre_gauss(lam, refractive_index, W0, l, p, z, x, y, coef=None):
"""
Laguerre Gauss in 2D
Parameters
----------
lam: wavelength
refractive_index: refractive index
W0: beam waists
l, p: order of the LG beam
z: the place in z to calculate for
x,y: matrices of x and y
coef
Returns
-------
Laguerre-Gaussian beam of order l,p in 2D
"""
k = 2 * np.pi * refractive_index / lam
z0 = np.pi * W0 ** 2 * refractive_index / lam # Rayleigh range
Wz = W0 * np.sqrt(1 + (z / z0) ** 2) # w(z), the variation of the spot size
r = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
invR = z / ((z ** 2) + (z0 ** 2)) # radius of curvature
gouy = (np.abs(l)+2*p+1)*np.arctan(z/z0)
if coef is None:
coef = np.sqrt(2*math.factorial(p)/(np.pi * math.factorial(p + np.abs(l))))
U = coef * \
(W0/Wz)*(r*np.sqrt(2)/Wz)**(np.abs(l)) * \
np.exp(-r**2 / Wz**2) * \
LaguerreP(p, l, 2 * r**2 / Wz**2) * \
np.exp(-1j * (k * r**2 / 2) * invR) * \
np.exp(-1j * l * phi) * \
np.exp(1j * gouy)
return U
def HermiteP(n, x):
"""
Hermite polynomial of rank n Hn(x)
Parameters
----------
n: order of the LG beam
x: matrix of x
Returns
-------
Hermite polynomial
"""
if n == 0:
return 1
elif n == 1:
return 2 * x
else:
return 2 * x * HermiteP(n - 1, x) - 2 * (n - 1) * HermiteP(n - 2, x)
def LaguerreP(p, l, x):
"""
Generalized Laguerre polynomial of rank p,l L_p^|l|(x)
Parameters
----------
l, p: order of the LG beam
x: matrix of x
Returns
-------
Generalized Laguerre polynomial
"""
if p == 0:
return 1
elif p == 1:
return 1 + np.abs(l)-x
else:
return ((2*p-1+np.abs(l)-x)*LaguerreP(p-1, l, x) - (p-1+np.abs(l))*LaguerreP(p-2, l, x))/p
class Beam(ABC):
"""
A class that holds everything to do with a beam
"""
def __init__(self,
lam: float,
ctype,
polarization: str,
T: float,
power: float = 0):
"""
Parameters
----------
lam: beam's wavelength
ctype: function that holds crystal type fo calculating refractive index
polarization: Polarization of the beam
T: crystal's temperature [Celsius Degrees]
power: beam power [watt]
"""
self.lam = lam
self.n = ctype(lam * 1e6, T, polarization) # refractive index
self.w = 2 * np.pi * c / lam # frequency
self.k = 2 * np.pi * ctype(lam * 1e6, T, polarization) / lam # wave vector
self.power = power # beam power
class Beam_profile(ABC):
def __init__(
self,
pump_coeffs_real,
pump_coeffs_imag,
waist_pump,
power_pump,
x,
y,
dx,
dy,
max_mode1,
max_mode2,
pump_basis: str,
lam_pump,
refractive_index,
learn_pump_coeffs: bool = False,
learn_pump_waists: bool = False,
z: float = 0.,
):
self.x = x
self.y = y
self.z = z
self.learn_pump_coeffs = learn_pump_coeffs
self.learn_pump_waists = learn_pump_waists
self.learn_pump = learn_pump_coeffs or learn_pump_waists
self.lam_pump = lam_pump
self.pump_basis = pump_basis
self.max_mode1 = max_mode1
self.max_mode2 = max_mode2
self.power = power_pump
self.crystal_dx = dx
self.crystal_dy = dy
self.refractive_index = refractive_index
if not self.learn_pump_coeffs:
self.pump_coeffs_real, \
self.pump_coeffs_imag = pump_coeffs_real, pump_coeffs_imag
if not self.learn_pump_waists:
self.waist_pump = waist_pump
if self.pump_basis.lower() == 'lg': # Laguerre-Gauss
self.coef = np.zeros(len(waist_pump), dtype=np.float32)
idx = 0
for p in range(self.max_mode1):
for l in range(-self.max_mode2, self.max_mode2 + 1):
self.coef = index_update(
self.coef, idx,
np.sqrt(2 * math.factorial(p) / (np.pi * math.factorial(p + np.abs(l))))
)
idx += 1
if not self.learn_pump:
self.E = self._profile_laguerre_gauss(pump_coeffs_real, pump_coeffs_imag, waist_pump)
elif self.pump_basis.lower() == "hg": # Hermite-Gauss
self.coef = np.zeros(len(waist_pump), dtype=np.float32)
idx = 0
for nx in range(self.max_mode1):
for ny in range(self.max_mode2):
self.coef = index_update(
self.coef, idx,
np.sqrt(np.sqrt(2 / pi) / (2 ** nx * math.factorial(nx))) *
np.sqrt(np.sqrt(2 / pi) / (2 ** ny * math.factorial(ny))))
idx += 1
if not self.learn_pump:
self.E = self._profile_hermite_gauss(pump_coeffs_real, pump_coeffs_imag, waist_pump)
def create_profile(self, pump_coeffs_real, pump_coeffs_imag, waist_pump):
if self.learn_pump:
if self.pump_basis.lower() == 'lg': # Laguerre-Gauss
if self.learn_pump_coeffs and self.learn_pump_waists:
self.E = self._profile_laguerre_gauss(
pump_coeffs_real, pump_coeffs_imag, waist_pump
)
elif self.learn_pump_coeffs:
self.E = self._profile_laguerre_gauss(
pump_coeffs_real, pump_coeffs_imag, self.waist_pump
)
else:
self.E = self._profile_laguerre_gauss(
self.pump_coeffs_real, self.pump_coeffs_imag, waist_pump
)
elif self.pump_basis.lower() == 'hg': # Hermite-Gauss
if self.learn_pump_coeffs and self.learn_pump_waists:
self.E = self._profile_hermite_gauss(
pump_coeffs_real, pump_coeffs_imag, waist_pump
)
elif self.learn_pump_coeffs:
self.E = self._profile_hermite_gauss(
pump_coeffs_real, pump_coeffs_imag, self.waist_pump
)
else:
self.E = self._profile_hermite_gauss(
self.pump_coeffs_real, self.pump_coeffs_imag, waist_pump
)
def _profile_laguerre_gauss(
self,
pump_coeffs_real,
pump_coeffs_imag,
waist_pump
):
coeffs = pump_coeffs_real + 1j * pump_coeffs_imag
[X, Y] = np.meshgrid(self.x, self.y)
pump_profile = 0.
idx = 0
for p in range(self.max_mode1):
for l in range(-self.max_mode2, self.max_mode2 + 1):
pump_profile += coeffs[idx] * \
Laguerre_gauss(self.lam_pump, self.refractive_index,
waist_pump[idx] * 1e-5, l, p, self.z, X, Y, self.coef[idx])
idx += 1
pump_profile = fix_power(pump_profile, self.power, self.refractive_index,
self.crystal_dx, self.crystal_dy)[np.newaxis, :, :]
return pump_profile
def _profile_hermite_gauss(
self,
pump_coeffs_real,
pump_coeffs_imag,
waist_pump
):
coeffs = pump_coeffs_real + 1j * pump_coeffs_imag
[X, Y] = np.meshgrid(self.x, self.y)
pump_profile = 0.
idx = 0
for nx in range(self.max_mode1):
for ny in range(self.max_mode2):
pump_profile += coeffs[idx] * \
Hermite_gauss(self.lam_pump, self.refractive_index,
waist_pump[idx] * 1e-5, nx, ny, self.z, X, Y, self.coef[idx])
idx += 1
pump_profile = fix_power(pump_profile, self.power, self.refractive_index,
self.crystal_dx, self.crystal_dy)[np.newaxis, :, :]
return pump_profile
class Crystal_hologram(ABC):
def __init__(
self,
crystal_coeffs_real,
crystal_coeffs_imag,
r_scale,
x,
y,
max_mode1,
max_mode2,
crystal_basis,
lam_signal,
refractive_index,
learn_crystal_coeffs: bool = False,
learn_crystal_waists: bool = False,
z: float = 0.,
):
self.x = x
self.y = y
self.z = z
self.learn_crystal_coeffs = learn_crystal_coeffs
self.learn_crystal_waists = learn_crystal_waists
self.learn_crystal = learn_crystal_coeffs or learn_crystal_waists
self.refractive_index = refractive_index
self.lam_signal = lam_signal
self.crystal_basis = crystal_basis
self.max_mode1 = max_mode1
self.max_mode2 = max_mode2
if not self.learn_crystal_coeffs:
self.crystal_coeffs_real, \
self.crystal_coeffs_imag = crystal_coeffs_real, crystal_coeffs_imag
if not self.learn_crystal_waists:
self.r_scale = r_scale
if crystal_basis.lower() == 'ft': # Fourier-Taylor
if not self.learn_crystal:
self.crystal_profile = self._profile_fourier_taylor(crystal_coeffs_real, crystal_coeffs_imag, r_scale)
elif crystal_basis.lower() == 'fb': # Fourier-Bessel
[X, Y] = np.meshgrid(self.x, self.y)
self.coef = np.zeros(len(r_scale), dtype=np.float32)
idx = 0
for p in range(self.max_mode1):
for l in range(-self.max_mode2, self.max_mode2 + 1):
rad = np.sqrt(X ** 2 + Y ** 2) / (r_scale[idx] * 1e-5)
self.coef = index_update(
self.coef, idx,
sp.jv(0, sp.jn_zeros(0, p + 1)[-1] * rad)
)
idx += 1
if not self.learn_crystal:
self.crystal_profile = self._profile_fourier_bessel(crystal_coeffs_real, crystal_coeffs_imag)
elif crystal_basis.lower() == 'lg': # Laguerre-Gauss
self.coef = np.zeros(len(r_scale), dtype=np.float32)
idx = 0
for p in range(self.max_mode1):
for l in range(-self.max_mode2, self.max_mode2 + 1):
self.coef = index_update(
self.coef, idx,
np.sqrt(2 * math.factorial(p) / (np.pi * math.factorial(p + np.abs(l))))
)
idx += 1
if not self.learn_crystal:
self.crystal_profile = self._profile_laguerre_gauss(crystal_coeffs_real, crystal_coeffs_imag, r_scale)
elif crystal_basis.lower() == 'hg': # Hermite-Gauss
self.coef = np.zeros(len(r_scale), dtype=np.float32)
idx = 0
for m in range(self.max_mode1):
for n in range(self.max_mode2):
self.coef = index_update(
self.coef, idx,
np.sqrt(np.sqrt(2 / pi) / (2 ** m * math.factorial(m))) *
np.sqrt(np.sqrt(2 / pi) / (2 ** n * math.factorial(n)))
)
idx += 1
if not self.learn_crystal:
self.crystal_profile = self._profile_hermite_gauss(crystal_coeffs_real, crystal_coeffs_imag, r_scale)
def create_profile(
self,
crystal_coeffs_real,
crystal_coeffs_imag,
r_scale,
):
if self.learn_crystal:
if self.crystal_basis.lower() == 'ft': # Fourier-Taylor
if self.learn_crystal_coeffs and self.learn_crystal_waists:
self.crystal_profile = self._profile_fourier_taylor(
crystal_coeffs_real, crystal_coeffs_imag, r_scale
)
elif self.learn_crystal_coeffs:
self.crystal_profile = self._profile_fourier_taylor(
crystal_coeffs_real, crystal_coeffs_imag, self.r_scale
)
else:
self.crystal_profile = self._profile_fourier_taylor(
self.crystal_coeffs_real, self.crystal_coeffs_imag, r_scale
)
elif self.crystal_basis.lower() == 'fb': # Fourier-Bessel
if self.learn_crystal_coeffs:
self.crystal_profile = self._profile_fourier_bessel(
crystal_coeffs_real, crystal_coeffs_imag
)
else:
self.crystal_profile = self._profile_fourier_bessel(
self.crystal_coeffs_real, self.crystal_coeffs_imag
)
elif self.crystal_basis.lower() == 'lg': # Laguerre-Gauss
if self.learn_crystal_coeffs and self.learn_crystal_waists:
self.crystal_profile = self._profile_laguerre_gauss(
crystal_coeffs_real, crystal_coeffs_imag, r_scale
)
elif self.learn_crystal_coeffs:
self.crystal_profile = self._profile_laguerre_gauss(
crystal_coeffs_real, crystal_coeffs_imag, self.r_scale
)
else:
self.crystal_profile = self._profile_laguerre_gauss(
self.crystal_coeffs_real, self.crystal_coeffs_imag, r_scale
)
elif self.crystal_basis.lower() == 'hg': # Hermite-Gauss
if self.learn_crystal_coeffs and self.learn_crystal_waists:
self.crystal_profile = self._profile_hermite_gauss(
crystal_coeffs_real, crystal_coeffs_imag, r_scale
)
elif self.learn_crystal_coeffs:
self.crystal_profile = self._profile_hermite_gauss(
crystal_coeffs_real, crystal_coeffs_imag, self.r_scale
)
else:
self.crystal_profile = self._profile_hermite_gauss(
self.crystal_coeffs_real, self.crystal_coeffs_imag, r_scale
)
def _profile_fourier_taylor(
self,
crystal_coeffs_real,
crystal_coeffs_imag,
r_scale,
):
coeffs = crystal_coeffs_real + 1j * crystal_coeffs_imag
[X, Y] = np.meshgrid(self.x, self.y)
phi_angle = np.arctan2(Y, X)
crystal_profile = 0.
idx = 0
for p in range(self.max_mode1):
for l in range(-self.max_mode2, self.max_mode2 + 1):
rad = np.sqrt(X**2 + Y**2) / (r_scale[idx] * 1e-5)
crystal_profile += coeffs[idx] * rad**p * np.exp(-rad**2) * np.exp(-1j * l * phi_angle)
idx += 1
return crystal_profile
def _profile_fourier_bessel(
self,
crystal_coeffs_real,
crystal_coeffs_imag,
):
coeffs = crystal_coeffs_real + 1j * crystal_coeffs_imag
[X, Y] = np.meshgrid(self.x, self.y)
phi_angle = np.arctan2(Y, X)
crystal_profile = 0.
idx = 0
for p in range(self.max_mode1):
for l in range(-self.max_mode2, self.max_mode2 + 1):
crystal_profile += coeffs[idx] * self.coef[idx] * np.exp(-1j * l * phi_angle)
idx += 1
return crystal_profile
def _profile_laguerre_gauss(
self,
crystal_coeffs_real,
crystal_coeffs_imag,
r_scale,
):
coeffs = crystal_coeffs_real + 1j * crystal_coeffs_imag
[X, Y] = np.meshgrid(self.x, self.y)
idx = 0
crystal_profile = 0.
for p in range(self.max_mode1):
for l in range(-self.max_mode2, self.max_mode2 + 1):
crystal_profile += coeffs[idx] * \
Laguerre_gauss(self.lam_signal, self.refractive_index,
r_scale[idx] * 1e-5, l, p, self.z, X, Y, self.coef[idx])
idx += 1
return crystal_profile
def _profile_hermite_gauss(
self,
crystal_coeffs_real,
crystal_coeffs_imag,
r_scale,
):
coeffs = crystal_coeffs_real + 1j * crystal_coeffs_imag
[X, Y] = np.meshgrid(self.x, self.y)
idx = 0
crystal_profile = 0.
for m in range(self.max_mode1):
for n in range(self.max_mode2):
crystal_profile += coeffs[idx] * \
Hermite_gauss(self.lam_signal, self.refractive_index,
r_scale[idx] * 1e-5, m, n, self.z, X, Y, self.coef[idx])
idx += 1
return crystal_profile
def fix_power(
A,
power,
n,
dx,
dy
):
"""
The function takes a field A and normalizes in to have the power indicated
Parameters
----------
A
power
n
dx
dy
Returns
-------
"""
output = A * np.sqrt(power) / np.sqrt(Power2D(A, n, dx, dy))
return output
class DensMat(ABC):
"""
A class that holds tomography dimensions and
tensors used for calculating qubit and qutrit tomography
"""
def __init__(
self,
projection_n_state2,
tomography_dimension
):
assert tomography_dimension in [2, 3], "tomography_dimension must be 2 or 3, " \
f"got {tomography_dimension}"
self.projection_n_state2 = projection_n_state2
self.tomography_dimension = tomography_dimension
self.rotation_mats, self.masks = self.dens_mat_tensors()
def dens_mat_tensors(
self
):
rot_mats_tensor = np.zeros([self.tomography_dimension ** 4,
self.tomography_dimension ** 2,
self.tomography_dimension ** 2],
dtype='complex64')
masks_tensor = np.zeros([self.tomography_dimension ** 4,
self.projection_n_state2,
self.projection_n_state2],
dtype='complex64')
if self.tomography_dimension == 2:
mats = (
np.eye(2, dtype='complex64'),
np.array([[0, 1], [1, 0]]),
np.array([[0, -1j], [1j, 0]]),
np.array([[1, 0], [0, -1]])
)
vecs = (
np.array([1, 1, 0, 0, 0, 0]),
np.array([0, 0, 1, -1, 0, 0]),
np.array([0, 0, 0, 0, 1, -1]),
np.array([1, -1, 0, 0, 0, 0])
)
else: # tomography_dimension == 3
mats = (
np.eye(3, dtype='complex64'),
np.array([[1, 0, 0], [0, -1, 0], [0, 0, 0]]),
np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]]),
np.array([[0, -1j, 0], [1j, 0, 0], [0, 0, 0]]),
np.array([[0, 0, 1], [0, 0, 0], [1, 0, 0]]),
np.array([[0, 0, -1j], [0, 0, 0], [1j, 0, 0]]),
np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]]),
np.array([[0, 0, 0], [0, 0, -1j], [0, 1j, 0]]),
(1 / np.sqrt(3)) * np.array([[1, 0, 0], [0, 1, 0], [0, 0, -2]])
)
vecs = (
np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1, 0, 0]),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, -1]),
(np.sqrt(3) / 3) * np.array([1, 1, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
)
counter = 0
for m in range(self.tomography_dimension ** 2):
for n in range(self.tomography_dimension ** 2):
norm1 = np.trace(mats[m] @ mats[m])
norm2 = np.trace(mats[n] @ mats[n])
mat1 = mats[m] / norm1
mat2 = mats[n] / norm2
rot_mats_tensor = index_add(rot_mats_tensor, index[counter, :, :], np.kron(mat1, mat2))
mask = np.dot(vecs[m].reshape(self.projection_n_state2, 1),
np.transpose((vecs[n]).reshape(self.projection_n_state2, 1)))
masks_tensor = index_add(masks_tensor, index[counter, :, :], mask)
counter = counter + 1
return rot_mats_tensor, masks_tensor
| 17,909
| 10
| 368
|
bd07df6570fc0aa352615daf76a0ee8b62298953
| 1,484
|
py
|
Python
|
test_scripts/test_LSST_sim.py
|
jakevdp/spheredb
|
e5e5ff8b8902459b3f38a1a413a712ac1695accc
|
[
"BSD-3-Clause"
] | 1
|
2021-08-29T06:01:28.000Z
|
2021-08-29T06:01:28.000Z
|
test_scripts/test_LSST_sim.py
|
jakevdp/spheredb
|
e5e5ff8b8902459b3f38a1a413a712ac1695accc
|
[
"BSD-3-Clause"
] | null | null | null |
test_scripts/test_LSST_sim.py
|
jakevdp/spheredb
|
e5e5ff8b8902459b3f38a1a413a712ac1695accc
|
[
"BSD-3-Clause"
] | 2
|
2018-08-03T20:27:35.000Z
|
2021-08-29T06:01:30.000Z
|
"""
Testing WCS projections on LSST simulation files
"""
import os, sys
sys.path.append(os.path.abspath('..'))
import numpy as np
import matplotlib.pyplot as plt
from spheredb.get_data import\
get_stripe82_file, all_lsst_exposures, get_LSST_file
from spheredb.conversions import FITS_to_HPX, HPX_grid_step
from spheredb.util import regrid
import os
import pyfits
import re
import datetime
# Note: USE INSERT NOT MERGE!!!!
if 1:
from scidbpy import interface
sdb = interface.SciDBShimInterface('http://vega.cs.washington.edu:8080')
Nside = 2 ** 16 #19
hdulist = get_LSST_file()
output = FITS_to_HPX(hdulist[1].header, hdulist[1].data, Nside,
return_sparse=True)
print output.shape
RA_range = (output.row.min(), output.row.max())
DEC_range = (output.col.min(), output.col.max())
dRA = RA_range[1] - RA_range[0]
dDEC = DEC_range[1] - DEC_range[0]
RA_range = (RA_range[0] - 1 * dRA, RA_range[1] + 1 * dRA)
DEC_range = (DEC_range[0] - 1 * dDEC, DEC_range[1] + 1 * dDEC)
arr = sdb.from_sparse(output)
subarr = arr[RA_range[0]:RA_range[1],
DEC_range[0]:DEC_range[1]]
plt.imshow(np.log(subarr.toarray()), cmap=plt.cm.binary)
plt.show()
elif 1:
times = [hdulist[1].header['TAI'] for hdulist in all_lsst_exposures()]
times = np.asarray(times)
times.sort()
print times.min()
print times.max()
plt.plot(24 * (times - 50095), '.k')
plt.show()
| 24.733333
| 76
| 0.657682
|
"""
Testing WCS projections on LSST simulation files
"""
import os, sys
sys.path.append(os.path.abspath('..'))
import numpy as np
import matplotlib.pyplot as plt
from spheredb.get_data import\
get_stripe82_file, all_lsst_exposures, get_LSST_file
from spheredb.conversions import FITS_to_HPX, HPX_grid_step
from spheredb.util import regrid
import os
import pyfits
import re
import datetime
# Note: USE INSERT NOT MERGE!!!!
if 1:
from scidbpy import interface
sdb = interface.SciDBShimInterface('http://vega.cs.washington.edu:8080')
Nside = 2 ** 16 #19
hdulist = get_LSST_file()
output = FITS_to_HPX(hdulist[1].header, hdulist[1].data, Nside,
return_sparse=True)
print output.shape
RA_range = (output.row.min(), output.row.max())
DEC_range = (output.col.min(), output.col.max())
dRA = RA_range[1] - RA_range[0]
dDEC = DEC_range[1] - DEC_range[0]
RA_range = (RA_range[0] - 1 * dRA, RA_range[1] + 1 * dRA)
DEC_range = (DEC_range[0] - 1 * dDEC, DEC_range[1] + 1 * dDEC)
arr = sdb.from_sparse(output)
subarr = arr[RA_range[0]:RA_range[1],
DEC_range[0]:DEC_range[1]]
plt.imshow(np.log(subarr.toarray()), cmap=plt.cm.binary)
plt.show()
elif 1:
times = [hdulist[1].header['TAI'] for hdulist in all_lsst_exposures()]
times = np.asarray(times)
times.sort()
print times.min()
print times.max()
plt.plot(24 * (times - 50095), '.k')
plt.show()
| 0
| 0
| 0
|
dc6f460bce02b737f3c3be063882b7ec784ca799
| 2,982
|
py
|
Python
|
tests/pytests/functional/conftest.py
|
GLaN1K/salt
|
ec1a907465c2d6dff126b747a52035e19b9a105b
|
[
"Apache-2.0"
] | null | null | null |
tests/pytests/functional/conftest.py
|
GLaN1K/salt
|
ec1a907465c2d6dff126b747a52035e19b9a105b
|
[
"Apache-2.0"
] | 4
|
2021-02-06T14:30:48.000Z
|
2021-12-13T20:50:10.000Z
|
tests/pytests/functional/conftest.py
|
GLaN1K/salt
|
ec1a907465c2d6dff126b747a52035e19b9a105b
|
[
"Apache-2.0"
] | 1
|
2021-05-10T13:59:33.000Z
|
2021-05-10T13:59:33.000Z
|
import logging
import shutil
import pytest
import salt.features
import salt.loader
import salt.pillar
log = logging.getLogger(__name__)
@pytest.fixture(scope="package")
@pytest.fixture(scope="module")
@pytest.fixture(scope="module")
@pytest.fixture(scope="module")
@pytest.fixture(scope="module")
@pytest.fixture(autouse=True)
| 26.157895
| 84
| 0.639504
|
import logging
import shutil
import pytest
import salt.features
import salt.loader
import salt.pillar
log = logging.getLogger(__name__)
class Loaders:
def __init__(self, opts):
self.opts = opts
self.context = {}
self._reset_state_funcs = [self.context.clear]
self._grains = self._utils = self._modules = self._pillar = None
self.opts["grains"] = self.grains
self.refresh_pillar()
salt.features.setup_features(self.opts)
def reset_state(self):
for func in self._reset_state_funcs:
func()
@property
def grains(self):
if self._grains is None:
self._grains = salt.loader.grains(self.opts, context=self.context)
return self._grains
@property
def utils(self):
if self._utils is None:
self._utils = salt.loader.utils(self.opts, context=self.context)
return self._utils
@property
def modules(self):
if self._modules is None:
self._modules = salt.loader.minion_mods(
self.opts, context=self.context, utils=self.utils, initial_load=True
)
return self._modules
@property
def pillar(self):
if self._pillar is None:
self._pillar = salt.pillar.get_pillar(
self.opts,
self.opts["grains"],
self.opts["id"],
saltenv=self.opts["saltenv"],
pillarenv=self.opts.get("pillarenv"),
).compile_pillar()
return self._pillar
def refresh_pillar(self):
self._pillar = None
self.opts["pillar"] = self.pillar
@pytest.fixture(scope="package")
def minion_id():
return "func-tests-minion"
@pytest.fixture(scope="module")
def state_tree(tmp_path_factory):
state_tree_path = tmp_path_factory.mktemp("state-tree-base")
try:
yield state_tree_path
finally:
shutil.rmtree(str(state_tree_path), ignore_errors=True)
@pytest.fixture(scope="module")
def state_tree_prod(tmp_path_factory):
state_tree_path = tmp_path_factory.mktemp("state-tree-prod")
try:
yield state_tree_path
finally:
shutil.rmtree(str(state_tree_path), ignore_errors=True)
@pytest.fixture(scope="module")
def minion_opts(
salt_factories, minion_id, state_tree, state_tree_prod,
):
config_overrides = {
"file_client": "local",
"file_roots": {"base": [str(state_tree)], "prod": [str(state_tree_prod)]},
"features": {"enable_slsvars_fixes": True},
}
factory = salt_factories.get_salt_minion_daemon(
minion_id, config_overrides=config_overrides,
)
return factory.config.copy()
@pytest.fixture(scope="module")
def loaders(minion_opts):
return Loaders(minion_opts)
@pytest.fixture(autouse=True)
def reset_loaders_state(loaders):
try:
# Run the tests
yield
finally:
# Reset the loaders state
loaders.reset_state()
| 2,248
| 237
| 155
|
2a064a6e84bc4fb785bd64064f8eb65382a5cafd
| 2,931
|
py
|
Python
|
using_sklearn.py
|
mzmmoazam/Titanic-Dataset
|
58704877afc3c23b88b29a575c04af09afc94af2
|
[
"MIT"
] | null | null | null |
using_sklearn.py
|
mzmmoazam/Titanic-Dataset
|
58704877afc3c23b88b29a575c04af09afc94af2
|
[
"MIT"
] | null | null | null |
using_sklearn.py
|
mzmmoazam/Titanic-Dataset
|
58704877afc3c23b88b29a575c04af09afc94af2
|
[
"MIT"
] | null | null | null |
import csv
import numpy as np
### I had only used numpy , as this is when I had just dived in
### this universe
### So , for beginners go search for pandas you will reduce the lines of code and it
### is awesome !!
### read data
with open('../Data/train.csv') as f:
reader = csv.reader(f, delimiter=',')
data = []
for row in reader:
data.append(row)
### labels
data_headers = data[0]
### get some fields only
for i in ["Name", "PassengerId", "Survived", "Ticket", "Fare", "Cabin"]:
data_headers.remove(i)
### preprocessing and encoding
data = np.array(data[1:])
data = np.delete(data, [0, 3], 1)
order = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Embarked']
data[data == ''] = '01111110'
train_result = data[:, 0]
data = np.delete(data, [0, 6], 1)
data = np.delete(data, 5, 1)
data = np.delete(data, 5, 1)
print(data_headers, data[0])
data[data == "male"] = 0
data[data == "female"] = 1
data[data == "S"] = 1
data[data == "Q"] = 0
data[data == "C"] = 2
### using various classifiers
# from sklearn.naive_bayes import GaussianNB
# clf=GaussianNB()
# from sklearn.tree import DecisionTreeClassifier
# clf=DecisionTreeClassifier()
# from sklearn.svm import SVC
# clf=SVC()
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
clf = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=2)
# clf=AdaBoostClassifier()
# from sklearn.neighbors import KNeighborsClassifier
# clf=KNeighborsClassifier()
# print(np.array(['','1']).astype(np.float),"jsbd")
# print(len(data.astype(np.float)),"############",len(train_result.astype(np.float)))
### fit data to classifier
clf.fit(data.astype(np.float), train_result.astype(np.float))
# Testing data
data = []
with open('../Data/test.csv') as f:
reader = csv.reader(f, delimiter=',')
data = []
for row in reader:
data.append(row)
# print(len(data))
data_headers = data[0]
### preprocessing for test data
for i in ["Name", "PassengerId", "Ticket", "Fare", "Cabin"]:
data = np.delete(data, data_headers.index(i), 1)
data_headers.remove(i)
data = np.array(data[1:])
data[data == ''] = '01111110'
data[data == "male"] = 0
data[data == "female"] = 1
data[data == "S"] = 1
data[data == "Q"] = 0
data[data == "C"] = 2
# print(len(data),len(order),data,"end data")
test_data = np.array(data[:, data_headers.index(order[0])])
for i in order[1:]:
test_data = np.vstack((test_data, data[:, data_headers.index(i)]))
# print(data_headers,"jdbfue",test_data,"jdbueb")
with open('../Data/gender_submission.csv') as f:
reader = csv.reader(f, delimiter=',')
test_labels = []
for row in reader:
test_labels.append(row[1])
print(len(test_labels))
test_labels = np.array(test_labels[1:])
ans = clf.predict(test_data.astype(np.float).T)
ans1 = np.array([range(892, 1310)])
ans = np.vstack((ans1.astype(np.int), ans.astype(np.int))).T
np.savetxt("fo1o.csv", ans, delimiter=",", fmt='%d')
| 27.914286
| 85
| 0.657455
|
import csv
import numpy as np
### I had only used numpy , as this is when I had just dived in
### this universe
### So , for beginners go search for pandas you will reduce the lines of code and it
### is awesome !!
### read data
with open('../Data/train.csv') as f:
reader = csv.reader(f, delimiter=',')
data = []
for row in reader:
data.append(row)
### labels
data_headers = data[0]
### get some fields only
for i in ["Name", "PassengerId", "Survived", "Ticket", "Fare", "Cabin"]:
data_headers.remove(i)
### preprocessing and encoding
data = np.array(data[1:])
data = np.delete(data, [0, 3], 1)
order = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Embarked']
data[data == ''] = '01111110'
train_result = data[:, 0]
data = np.delete(data, [0, 6], 1)
data = np.delete(data, 5, 1)
data = np.delete(data, 5, 1)
print(data_headers, data[0])
data[data == "male"] = 0
data[data == "female"] = 1
data[data == "S"] = 1
data[data == "Q"] = 0
data[data == "C"] = 2
### using various classifiers
# from sklearn.naive_bayes import GaussianNB
# clf=GaussianNB()
# from sklearn.tree import DecisionTreeClassifier
# clf=DecisionTreeClassifier()
# from sklearn.svm import SVC
# clf=SVC()
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
clf = RandomForestClassifier(max_depth=5, n_estimators=10, max_features=2)
# clf=AdaBoostClassifier()
# from sklearn.neighbors import KNeighborsClassifier
# clf=KNeighborsClassifier()
# print(np.array(['','1']).astype(np.float),"jsbd")
# print(len(data.astype(np.float)),"############",len(train_result.astype(np.float)))
### fit data to classifier
clf.fit(data.astype(np.float), train_result.astype(np.float))
# Testing data
data = []
with open('../Data/test.csv') as f:
reader = csv.reader(f, delimiter=',')
data = []
for row in reader:
data.append(row)
# print(len(data))
data_headers = data[0]
### preprocessing for test data
for i in ["Name", "PassengerId", "Ticket", "Fare", "Cabin"]:
data = np.delete(data, data_headers.index(i), 1)
data_headers.remove(i)
data = np.array(data[1:])
data[data == ''] = '01111110'
data[data == "male"] = 0
data[data == "female"] = 1
data[data == "S"] = 1
data[data == "Q"] = 0
data[data == "C"] = 2
# print(len(data),len(order),data,"end data")
test_data = np.array(data[:, data_headers.index(order[0])])
for i in order[1:]:
test_data = np.vstack((test_data, data[:, data_headers.index(i)]))
# print(data_headers,"jdbfue",test_data,"jdbueb")
with open('../Data/gender_submission.csv') as f:
reader = csv.reader(f, delimiter=',')
test_labels = []
for row in reader:
test_labels.append(row[1])
print(len(test_labels))
test_labels = np.array(test_labels[1:])
ans = clf.predict(test_data.astype(np.float).T)
ans1 = np.array([range(892, 1310)])
ans = np.vstack((ans1.astype(np.int), ans.astype(np.int))).T
np.savetxt("fo1o.csv", ans, delimiter=",", fmt='%d')
| 0
| 0
| 0
|
8453c193927c760ea206d0589cb5c1335098bc78
| 244
|
py
|
Python
|
ltime/util.py
|
fanjindong/ltime
|
c17025ceb4ed0e423162e3c302fc777fcdada7df
|
[
"MIT"
] | null | null | null |
ltime/util.py
|
fanjindong/ltime
|
c17025ceb4ed0e423162e3c302fc777fcdada7df
|
[
"MIT"
] | null | null | null |
ltime/util.py
|
fanjindong/ltime
|
c17025ceb4ed0e423162e3c302fc777fcdada7df
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
| 15.25
| 38
| 0.614754
|
from __future__ import absolute_import
def is_timestamp(value):
if type(value) == bool:
return False
try:
float(value)
return True
except:
return False
def isstr(s):
return isinstance(s, str)
| 157
| 0
| 46
|
b127bdb8be952e1a6b5245cf98323de492eec89b
| 1,653
|
py
|
Python
|
nanotune/model/node.py
|
jenshnielsen/nanotune
|
0f2a252d1986f9a5ff155fad626658f85aec3f3e
|
[
"MIT"
] | 5
|
2021-02-24T14:32:37.000Z
|
2022-01-05T16:37:26.000Z
|
nanotune/model/node.py
|
jenshnielsen/nanotune
|
0f2a252d1986f9a5ff155fad626658f85aec3f3e
|
[
"MIT"
] | 149
|
2021-03-23T14:44:39.000Z
|
2022-03-31T06:09:07.000Z
|
nanotune/model/node.py
|
jenshnielsen/nanotune
|
0f2a252d1986f9a5ff155fad626658f85aec3f3e
|
[
"MIT"
] | 10
|
2021-03-29T13:36:38.000Z
|
2022-02-16T23:06:35.000Z
|
import logging
from typing import Optional
import qcodes as qc
from qcodes import Instrument, InstrumentChannel, Parameter
from qcodes import validators as vals
from qcodes.instrument.base import InstrumentBase
from qcodes.utils.validators import Validator
import nanotune as nt
logger = logging.getLogger(__name__)
| 23.956522
| 59
| 0.553539
|
import logging
from typing import Optional
import qcodes as qc
from qcodes import Instrument, InstrumentChannel, Parameter
from qcodes import validators as vals
from qcodes.instrument.base import InstrumentBase
from qcodes.utils.validators import Validator
import nanotune as nt
logger = logging.getLogger(__name__)
class Node(InstrumentChannel):
def __init__(
self,
parent: InstrumentBase,
name: str = "node",
label: str = "",
node_type: Optional[str] = None,
n_init: int = 0,
v_init: float = 0,
):
pass
super().__init__(parent, name)
self.add_parameter(
"node_type",
label="node type " + label,
unit=None,
get_cmd=None,
set_cmd=None,
initial_value=node_type,
vals=vals.Strings(),
)
self.add_parameter(
"n",
label="number of charges " + label,
unit=None,
set_cmd=self._set_n,
get_cmd=self._get_n,
initial_value=n_init,
vals=vals.Ints(-100000, 100000),
)
self.add_parameter(
"v",
label="voltage node " + label,
unit="V",
set_cmd=self._set_v,
get_cmd=self._get_v,
initial_value=v_init,
vals=vals.Numbers(-100000, 100000),
)
def _set_n(self, new_N: int) -> None:
self._n = new_N
def _get_n(self) -> int:
return self._n
def _set_v(self, new_V: int) -> None:
self._v = new_V
def _get_v(self) -> float:
return self._v
| 1,167
| 9
| 157
|
fc6aba255f9187115a00a25541285fac525573c7
| 8,015
|
py
|
Python
|
notebooks/md/data/md_equil/6KZD/md_equil.py
|
openkinome/study-ntrk-resistance
|
3c4ba64538ec55c774d34c9d7cd0a5e4665dc9d5
|
[
"MIT"
] | null | null | null |
notebooks/md/data/md_equil/6KZD/md_equil.py
|
openkinome/study-ntrk-resistance
|
3c4ba64538ec55c774d34c9d7cd0a5e4665dc9d5
|
[
"MIT"
] | 1
|
2021-07-30T15:01:53.000Z
|
2021-08-02T09:48:08.000Z
|
notebooks/md/data/md_equil/6KZD/md_equil.py
|
openkinome/study-ntrk-resistance
|
3c4ba64538ec55c774d34c9d7cd0a5e4665dc9d5
|
[
"MIT"
] | null | null | null |
import argparse
import os
import sys
from sys import stdout
import mdtraj as md
import numpy as np
import parmed
import simtk.openmm as mm
import simtk.openmm.app as app
import simtk.unit as unit
from openforcefield.topology import Molecule, Topology
from openmmforcefields.generators import SystemGenerator
from perses.utils.openeye import OEMol_to_omm_ff, createOEMolFromSDF
from simtk.openmm import MonteCarloBarostat, XmlSerializer
from simtk.openmm.app import CheckpointReporter, ForceField, PDBFile
from simtk.openmm.app.pdbreporter import PDBReporter
from simtk.openmm.app.statedatareporter import StateDataReporter
# Read arguments to get ligand
parser = argparse.ArgumentParser()
parser.add_argument(
"-ligand",
help="the docked ligand to be prepared for simulation",
choices=["larotrectinib", "selitrectinib", "repotrectinib"],
type=str,
)
args = parser.parse_args()
chosen_ligand = args.ligand
# Parameters
print("--> Reading parameters")
pressure = 1.0 * unit.bar
temperature = 300 * unit.kelvin
nonbonded_method = app.PME
constraints = app.HBonds
remove_cm_motion = True
collision_rate = 1.0 / unit.picoseconds
timestep = 0.002 * unit.picoseconds
solvent_padding = 10.0 * unit.angstrom
ionic_strength = 150 * unit.millimolar
# Forcefield
protein_forcefield = "amber14/protein.ff14SB.xml"
small_molecule_forcefield = "openff-1.1.0"
solvation_forcefield = "amber14/tip3p.xml"
forcefield = ForceField(protein_forcefield, solvation_forcefield)
# Set steps and frequencies
nsteps = 2500000 # 5 ns
report_freq = 100
chk_freq = 500
traj_freq = 1000 # 2500 frames
# Set the input file names
input_pdb = "6KZD_prepped.pdb"
input_ligands_sdf = "../../structures_from_docking/6KZD_chemgauss_docking.sdf"
# Create output directory
output_prefix = "./output/" + chosen_ligand
os.makedirs(output_prefix, exist_ok=True)
print("--> Directory ", output_prefix, " created ")
# Set file names
integrator_xml_filename = "integrator_2fs.xml"
state_xml_filename = "equilibrated_state_5ns.xml"
state_pdb_filename = "equilibrated_state_5ns.pdb"
system_xml_filename = "equilibrated_system_5ns.xml"
checkpoint_filename = "equilibrated_checkpoint_5ns.chk"
traj_output_filename = "equilibrated_traj_5ns.xtc"
# Define the barostat for the system
barostat = mm.MonteCarloBarostat(pressure, temperature)
# Load and sort ligands
molecules = Molecule.from_file(input_ligands_sdf)
ligand_names = ["larotrectinib", "selitrectinib", "repotrectinib"]
ligand_dict = dict(zip(ligand_names, molecules)) # Create dict for easy access later
# Make the SystemGenerator
system_generator = SystemGenerator(
forcefields=[protein_forcefield, solvation_forcefield],
barostat=barostat,
periodic_forcefield_kwargs={"nonbondedMethod": app.PME},
small_molecule_forcefield=small_molecule_forcefield,
molecules=ligand_dict[chosen_ligand],
)
# Read in the PDB and create an OpenMM topology
pdbfile = app.PDBFile(input_pdb)
protein_topology, protein_positions = pdbfile.topology, pdbfile.positions
# Add ligand to topology - credit to @hannahbrucemacdonald for help here
print("--> Combining protein and ligand topologies")
off_ligand_topology = Topology.from_molecules(ligand_dict[chosen_ligand])
ligand_topology = off_ligand_topology.to_openmm()
ligand_positions = ligand_dict[chosen_ligand].conformers[0]
md_protein_topology = md.Topology.from_openmm(
protein_topology
) # using mdtraj for protein top
md_ligand_topology = md.Topology.from_openmm(
ligand_topology
) # using mdtraj for ligand top
md_complex_topology = md_protein_topology.join(md_ligand_topology) # add them together
complex_topology = md_complex_topology.to_openmm() # now back to openmm
total_atoms = len(protein_positions) + len(ligand_positions)
complex_positions = unit.Quantity(np.zeros([total_atoms, 3]), unit=unit.nanometers)
complex_positions[0 : len(protein_positions)] = protein_positions
for index, atom in enumerate(ligand_positions, len(protein_positions)):
coords = atom / atom.unit
complex_positions[index] = (
coords / 10.0
) * unit.nanometers # since openmm works in nm
# Add hydrogens and solvate the system
modeller = app.Modeller(complex_topology, complex_positions)
print("Adding hydrogens to the system...")
modeller.addHydrogens(system_generator.forcefield)
print("Solvating the system...")
modeller.addSolvent(
forcefield=system_generator.forcefield,
model="tip3p",
ionicStrength=ionic_strength,
padding=solvent_padding,
)
# Create an OpenMM system
print("--> Creating an OpenMM system")
system = system_generator.create_system(modeller.topology)
# Make and serialize integrator - Langevin dynamics
print(
"Serializing integrator to %s"
% os.path.join(output_prefix, integrator_xml_filename)
)
integrator = mm.LangevinIntegrator(
temperature, collision_rate, timestep # Friction coefficient
)
with open(os.path.join(output_prefix, integrator_xml_filename), "w") as outfile:
xml = mm.XmlSerializer.serialize(integrator)
outfile.write(xml)
# Define the platform to use; CUDA, OpenCL, CPU, or Reference. Or do not specify
# the platform to use the default (fastest) platform
# platform = mm.Platform.getPlatformByName("OpenCL")
# prop = dict(OpenCLPrecision="mixed") # Use mixed single/double precision
# Create the Simulation object
sim = app.Simulation(modeller.topology, system, integrator) # , platform, prop)
# Set the particle positions
sim.context.setPositions(modeller.positions)
# Minimize the energy
print("--> Minimising energy with docked ligand: " + chosen_ligand)
print(
" initial : %8.3f kcal/mol"
% (
sim.context.getState(getEnergy=True).getPotentialEnergy()
/ unit.kilocalories_per_mole
)
)
sim.minimizeEnergy()
print(
" final : %8.3f kcal/mol"
% (
sim.context.getState(getEnergy=True).getPotentialEnergy()
/ unit.kilocalories_per_mole
)
)
# set starting velocities:
print("--> Generating random starting velocities")
sim.context.setVelocitiesToTemperature(temperature * unit.kelvin)
# write limited state information to standard out:
sim.reporters.append(
StateDataReporter(
stdout,
reportInterval=report_freq,
step=True,
time=True,
potentialEnergy=True,
kineticEnergy=True,
temperature=True,
speed=True,
progress=True,
remainingTime=True,
totalSteps=nsteps,
separator="\t",
)
)
# Write to checkpoint files regularly:
sim.reporters.append(
CheckpointReporter(
file=os.path.join(output_prefix, checkpoint_filename), reportInterval=chk_freq
)
)
# Write out the trajectory
sim.reporters.append(
md.reporters.XTCReporter(
file=os.path.join(output_prefix, traj_output_filename), reportInterval=traj_freq
)
)
# Run NPT dynamics
print("--> Running dynamics in the NPT ensemble for the 6KZD:" + chosen_ligand + " complex")
sim.step(nsteps)
# Save and serialize the final state
print("--> Serializing state to %s" % os.path.join(output_prefix, state_xml_filename))
state = sim.context.getState(
getPositions=True, getVelocities=True, getEnergy=True, getForces=True
)
with open(os.path.join(output_prefix, state_xml_filename), "w") as outfile:
xml = mm.XmlSerializer.serialize(state)
outfile.write(xml)
# Save the final state as a PDB
print("--> Saving final state as %s" % os.path.join(output_prefix, state_pdb_filename))
with open(os.path.join(output_prefix, state_pdb_filename), "w") as outfile:
PDBFile.writeFile(
sim.topology,
sim.context.getState(getPositions=True, enforcePeriodicBox=True).getPositions(),
file=outfile,
keepIds=True,
)
# Save and serialize system
print("--> Serializing system to %s" % os.path.join(output_prefix, system_xml_filename))
system.setDefaultPeriodicBoxVectors(*state.getPeriodicBoxVectors())
with open(os.path.join(output_prefix, system_xml_filename), "w") as outfile:
xml = mm.XmlSerializer.serialize(system)
outfile.write(xml)
| 33.676471
| 92
| 0.76282
|
import argparse
import os
import sys
from sys import stdout
import mdtraj as md
import numpy as np
import parmed
import simtk.openmm as mm
import simtk.openmm.app as app
import simtk.unit as unit
from openforcefield.topology import Molecule, Topology
from openmmforcefields.generators import SystemGenerator
from perses.utils.openeye import OEMol_to_omm_ff, createOEMolFromSDF
from simtk.openmm import MonteCarloBarostat, XmlSerializer
from simtk.openmm.app import CheckpointReporter, ForceField, PDBFile
from simtk.openmm.app.pdbreporter import PDBReporter
from simtk.openmm.app.statedatareporter import StateDataReporter
# Read arguments to get ligand
parser = argparse.ArgumentParser()
parser.add_argument(
"-ligand",
help="the docked ligand to be prepared for simulation",
choices=["larotrectinib", "selitrectinib", "repotrectinib"],
type=str,
)
args = parser.parse_args()
chosen_ligand = args.ligand
# Parameters
print("--> Reading parameters")
pressure = 1.0 * unit.bar
temperature = 300 * unit.kelvin
nonbonded_method = app.PME
constraints = app.HBonds
remove_cm_motion = True
collision_rate = 1.0 / unit.picoseconds
timestep = 0.002 * unit.picoseconds
solvent_padding = 10.0 * unit.angstrom
ionic_strength = 150 * unit.millimolar
# Forcefield
protein_forcefield = "amber14/protein.ff14SB.xml"
small_molecule_forcefield = "openff-1.1.0"
solvation_forcefield = "amber14/tip3p.xml"
forcefield = ForceField(protein_forcefield, solvation_forcefield)
# Set steps and frequencies
nsteps = 2500000 # 5 ns
report_freq = 100
chk_freq = 500
traj_freq = 1000 # 2500 frames
# Set the input file names
input_pdb = "6KZD_prepped.pdb"
input_ligands_sdf = "../../structures_from_docking/6KZD_chemgauss_docking.sdf"
# Create output directory
output_prefix = "./output/" + chosen_ligand
os.makedirs(output_prefix, exist_ok=True)
print("--> Directory ", output_prefix, " created ")
# Set file names
integrator_xml_filename = "integrator_2fs.xml"
state_xml_filename = "equilibrated_state_5ns.xml"
state_pdb_filename = "equilibrated_state_5ns.pdb"
system_xml_filename = "equilibrated_system_5ns.xml"
checkpoint_filename = "equilibrated_checkpoint_5ns.chk"
traj_output_filename = "equilibrated_traj_5ns.xtc"
# Define the barostat for the system
barostat = mm.MonteCarloBarostat(pressure, temperature)
# Load and sort ligands
molecules = Molecule.from_file(input_ligands_sdf)
ligand_names = ["larotrectinib", "selitrectinib", "repotrectinib"]
ligand_dict = dict(zip(ligand_names, molecules)) # Create dict for easy access later
# Make the SystemGenerator
system_generator = SystemGenerator(
forcefields=[protein_forcefield, solvation_forcefield],
barostat=barostat,
periodic_forcefield_kwargs={"nonbondedMethod": app.PME},
small_molecule_forcefield=small_molecule_forcefield,
molecules=ligand_dict[chosen_ligand],
)
# Read in the PDB and create an OpenMM topology
pdbfile = app.PDBFile(input_pdb)
protein_topology, protein_positions = pdbfile.topology, pdbfile.positions
# Add ligand to topology - credit to @hannahbrucemacdonald for help here
print("--> Combining protein and ligand topologies")
off_ligand_topology = Topology.from_molecules(ligand_dict[chosen_ligand])
ligand_topology = off_ligand_topology.to_openmm()
ligand_positions = ligand_dict[chosen_ligand].conformers[0]
md_protein_topology = md.Topology.from_openmm(
protein_topology
) # using mdtraj for protein top
md_ligand_topology = md.Topology.from_openmm(
ligand_topology
) # using mdtraj for ligand top
md_complex_topology = md_protein_topology.join(md_ligand_topology) # add them together
complex_topology = md_complex_topology.to_openmm() # now back to openmm
total_atoms = len(protein_positions) + len(ligand_positions)
complex_positions = unit.Quantity(np.zeros([total_atoms, 3]), unit=unit.nanometers)
complex_positions[0 : len(protein_positions)] = protein_positions
for index, atom in enumerate(ligand_positions, len(protein_positions)):
coords = atom / atom.unit
complex_positions[index] = (
coords / 10.0
) * unit.nanometers # since openmm works in nm
# Add hydrogens and solvate the system
modeller = app.Modeller(complex_topology, complex_positions)
print("Adding hydrogens to the system...")
modeller.addHydrogens(system_generator.forcefield)
print("Solvating the system...")
modeller.addSolvent(
forcefield=system_generator.forcefield,
model="tip3p",
ionicStrength=ionic_strength,
padding=solvent_padding,
)
# Create an OpenMM system
print("--> Creating an OpenMM system")
system = system_generator.create_system(modeller.topology)
# Make and serialize integrator - Langevin dynamics
print(
"Serializing integrator to %s"
% os.path.join(output_prefix, integrator_xml_filename)
)
integrator = mm.LangevinIntegrator(
temperature, collision_rate, timestep # Friction coefficient
)
with open(os.path.join(output_prefix, integrator_xml_filename), "w") as outfile:
xml = mm.XmlSerializer.serialize(integrator)
outfile.write(xml)
# Define the platform to use; CUDA, OpenCL, CPU, or Reference. Or do not specify
# the platform to use the default (fastest) platform
# platform = mm.Platform.getPlatformByName("OpenCL")
# prop = dict(OpenCLPrecision="mixed") # Use mixed single/double precision
# Create the Simulation object
sim = app.Simulation(modeller.topology, system, integrator) # , platform, prop)
# Set the particle positions
sim.context.setPositions(modeller.positions)
# Minimize the energy
print("--> Minimising energy with docked ligand: " + chosen_ligand)
print(
" initial : %8.3f kcal/mol"
% (
sim.context.getState(getEnergy=True).getPotentialEnergy()
/ unit.kilocalories_per_mole
)
)
sim.minimizeEnergy()
print(
" final : %8.3f kcal/mol"
% (
sim.context.getState(getEnergy=True).getPotentialEnergy()
/ unit.kilocalories_per_mole
)
)
# set starting velocities:
print("--> Generating random starting velocities")
sim.context.setVelocitiesToTemperature(temperature * unit.kelvin)
# write limited state information to standard out:
sim.reporters.append(
StateDataReporter(
stdout,
reportInterval=report_freq,
step=True,
time=True,
potentialEnergy=True,
kineticEnergy=True,
temperature=True,
speed=True,
progress=True,
remainingTime=True,
totalSteps=nsteps,
separator="\t",
)
)
# Write to checkpoint files regularly:
sim.reporters.append(
CheckpointReporter(
file=os.path.join(output_prefix, checkpoint_filename), reportInterval=chk_freq
)
)
# Write out the trajectory
sim.reporters.append(
md.reporters.XTCReporter(
file=os.path.join(output_prefix, traj_output_filename), reportInterval=traj_freq
)
)
# Run NPT dynamics
print("--> Running dynamics in the NPT ensemble for the 6KZD:" + chosen_ligand + " complex")
sim.step(nsteps)
# Save and serialize the final state
print("--> Serializing state to %s" % os.path.join(output_prefix, state_xml_filename))
state = sim.context.getState(
getPositions=True, getVelocities=True, getEnergy=True, getForces=True
)
with open(os.path.join(output_prefix, state_xml_filename), "w") as outfile:
xml = mm.XmlSerializer.serialize(state)
outfile.write(xml)
# Save the final state as a PDB
print("--> Saving final state as %s" % os.path.join(output_prefix, state_pdb_filename))
with open(os.path.join(output_prefix, state_pdb_filename), "w") as outfile:
PDBFile.writeFile(
sim.topology,
sim.context.getState(getPositions=True, enforcePeriodicBox=True).getPositions(),
file=outfile,
keepIds=True,
)
# Save and serialize system
print("--> Serializing system to %s" % os.path.join(output_prefix, system_xml_filename))
system.setDefaultPeriodicBoxVectors(*state.getPeriodicBoxVectors())
with open(os.path.join(output_prefix, system_xml_filename), "w") as outfile:
xml = mm.XmlSerializer.serialize(system)
outfile.write(xml)
| 0
| 0
| 0
|
e215f99357e8f5180f8fadd116c52486463e20bc
| 910
|
py
|
Python
|
app/models/answerselect.py
|
garybake/quizmaster
|
f5f03b9a70823cbcd0299fc1e21f52347292d136
|
[
"MIT"
] | null | null | null |
app/models/answerselect.py
|
garybake/quizmaster
|
f5f03b9a70823cbcd0299fc1e21f52347292d136
|
[
"MIT"
] | 1
|
2021-03-23T19:18:36.000Z
|
2021-03-23T19:18:36.000Z
|
app/models/answerselect.py
|
garybake/quizmaster
|
f5f03b9a70823cbcd0299fc1e21f52347292d136
|
[
"MIT"
] | null | null | null |
"""Holds AnswerSelect model."""
import datetime
from .. import db
class AnswerSelect(db.Model):
"""Model to hold a users selected answers."""
__tablename__ = "answerselects"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
quiz_id = db.Column(db.Integer, db.ForeignKey("quizzes.id"))
question_id = db.Column(db.Integer, db.ForeignKey("questions.id"))
answer_id = db.Column(db.Integer, db.ForeignKey("answers.id"))
created_date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
user = db.relationship("User", backref="answerselects", lazy=True)
answer = db.relationship("Answer", backref="answerselects", lazy=True)
def __repr__(self):
"""Return string readable version of model."""
return "<AnswerSelect {}:{}>".format(
self.user.name,
self.answer.text)
| 32.5
| 75
| 0.672527
|
"""Holds AnswerSelect model."""
import datetime
from .. import db
class AnswerSelect(db.Model):
"""Model to hold a users selected answers."""
__tablename__ = "answerselects"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
quiz_id = db.Column(db.Integer, db.ForeignKey("quizzes.id"))
question_id = db.Column(db.Integer, db.ForeignKey("questions.id"))
answer_id = db.Column(db.Integer, db.ForeignKey("answers.id"))
created_date = db.Column(db.DateTime, default=datetime.datetime.utcnow)
user = db.relationship("User", backref="answerselects", lazy=True)
answer = db.relationship("Answer", backref="answerselects", lazy=True)
def __repr__(self):
"""Return string readable version of model."""
return "<AnswerSelect {}:{}>".format(
self.user.name,
self.answer.text)
| 0
| 0
| 0
|
556f8642ca306c2f5a199d9fbb2db0764833a1bb
| 490
|
py
|
Python
|
src/osparc_function_services/cli.py
|
pcrespov/osparc-function-services
|
68be5bf2cad181f703488c2f358efa668f3ac3c5
|
[
"MIT"
] | null | null | null |
src/osparc_function_services/cli.py
|
pcrespov/osparc-function-services
|
68be5bf2cad181f703488c2f358efa668f3ac3c5
|
[
"MIT"
] | null | null | null |
src/osparc_function_services/cli.py
|
pcrespov/osparc-function-services
|
68be5bf2cad181f703488c2f358efa668f3ac3c5
|
[
"MIT"
] | null | null | null |
import typer
from .app import run_as_service
from .sensitivity_ua_services import (
sensitivity_ua_linear_regression,
sensitivity_ua_test_func,
)
from .demo_services import demo_func
main = typer.Typer()
@main.command()
@main.command()
@main.command()
if __name__ == "__main__":
main()
| 16.333333
| 52
| 0.763265
|
import typer
from .app import run_as_service
from .sensitivity_ua_services import (
sensitivity_ua_linear_regression,
sensitivity_ua_test_func,
)
from .demo_services import demo_func
main = typer.Typer()
@main.command()
def test_func():
run_as_service(sensitivity_ua_test_func)
@main.command()
def linear_regression():
run_as_service(sensitivity_ua_linear_regression)
@main.command()
def demo():
run_as_service(demo_func)
if __name__ == "__main__":
main()
| 116
| 0
| 66
|
04870301bfda3a6c2bb0147c54606c0bf498888d
| 158
|
py
|
Python
|
src/ros_vision_interaction/src/vision-interaction/vision_project_tools/engine_statedb.py
|
HaaaO/vision-project
|
72256af07834195cfe52ac344aee5effcd0da978
|
[
"MIT"
] | null | null | null |
src/ros_vision_interaction/src/vision-interaction/vision_project_tools/engine_statedb.py
|
HaaaO/vision-project
|
72256af07834195cfe52ac344aee5effcd0da978
|
[
"MIT"
] | 21
|
2020-09-09T18:55:58.000Z
|
2021-07-26T19:42:46.000Z
|
src/ros_vision_interaction/src/vision-interaction/vision_project_tools/engine_statedb.py
|
HaaaO/vision-project
|
72256af07834195cfe52ac344aee5effcd0da978
|
[
"MIT"
] | 6
|
2020-12-20T17:19:29.000Z
|
2021-08-09T22:33:04.000Z
|
from mongodb_statedb import StateDb
| 19.75
| 61
| 0.721519
|
from mongodb_statedb import StateDb
class EngineStateDb(StateDb):
def is_set(self, key):
return self.exists(key) and self.get(key) is not None
| 63
| 8
| 50
|
a02a7d46d66be767da8b325d1fb24538b4225035
| 631
|
py
|
Python
|
engine/__init__.py
|
ZenithClown/DQNProjects
|
3a9f022166022509fce0f4306ed5612854d5539f
|
[
"Apache-2.0"
] | null | null | null |
engine/__init__.py
|
ZenithClown/DQNProjects
|
3a9f022166022509fce0f4306ed5612854d5539f
|
[
"Apache-2.0"
] | null | null | null |
engine/__init__.py
|
ZenithClown/DQNProjects
|
3a9f022166022509fce0f4306ed5612854d5539f
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
DeepQ Learning is a Reinforcement Learning Platform where AI Learns to Play Games
In this project, I'm trying to develop some 'Q-Learning Algorithms' where the
neural network will learn to play various games. The `engine` is specifically
designed to build games that can be used to train and test the models. The game
engines are also built such that an user can self play without any overhead.
List of Games Available:
1. Classic Snake Game (`snake.py`)
@author: Debmalya Pramanik
@Contact: dPramanik.official@gmail.com
"""
# init-time options registrations
from .snake import * # noqa: F403
| 31.55
| 81
| 0.759113
|
# -*- encoding: utf-8 -*-
"""
DeepQ Learning is a Reinforcement Learning Platform where AI Learns to Play Games
In this project, I'm trying to develop some 'Q-Learning Algorithms' where the
neural network will learn to play various games. The `engine` is specifically
designed to build games that can be used to train and test the models. The game
engines are also built such that an user can self play without any overhead.
List of Games Available:
1. Classic Snake Game (`snake.py`)
@author: Debmalya Pramanik
@Contact: dPramanik.official@gmail.com
"""
# init-time options registrations
from .snake import * # noqa: F403
| 0
| 0
| 0
|
ef1655c61f13f3ef2c6142375d3cac6ee69db26b
| 623
|
py
|
Python
|
ocrd_utils/ocrd_utils/constants.py
|
wrznr/pyocrd
|
25c4dd8c60285b7877803e2b627d72c8c0a4ab1e
|
[
"Apache-2.0"
] | null | null | null |
ocrd_utils/ocrd_utils/constants.py
|
wrznr/pyocrd
|
25c4dd8c60285b7877803e2b627d72c8c0a4ab1e
|
[
"Apache-2.0"
] | null | null | null |
ocrd_utils/ocrd_utils/constants.py
|
wrznr/pyocrd
|
25c4dd8c60285b7877803e2b627d72c8c0a4ab1e
|
[
"Apache-2.0"
] | null | null | null |
"""
Constants for ocrd_utils.
"""
from pkg_resources import get_distribution
__all__ = [
'VERSION',
'MIMETYPE_PAGE',
'EXT_TO_MIME',
'MIME_TO_EXT'
]
VERSION = get_distribution('ocrd_utils').version
MIMETYPE_PAGE = 'application/vnd.prima.page+xml'
EXT_TO_MIME = {
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.png': 'image/png',
'.jpg': 'image/jpg',
'.jpeg': 'image/jpg',
'.xml': MIMETYPE_PAGE
}
MIME_TO_EXT = {
'image/tiff': '.tif',
'image/png': '.png',
'image/jpg': '.jpg',
'image/jpeg': '.jpg',
MIMETYPE_PAGE: '.xml',
'application/alto+xml': '.xml',
}
| 18.323529
| 48
| 0.592295
|
"""
Constants for ocrd_utils.
"""
from pkg_resources import get_distribution
__all__ = [
'VERSION',
'MIMETYPE_PAGE',
'EXT_TO_MIME',
'MIME_TO_EXT'
]
VERSION = get_distribution('ocrd_utils').version
MIMETYPE_PAGE = 'application/vnd.prima.page+xml'
EXT_TO_MIME = {
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.png': 'image/png',
'.jpg': 'image/jpg',
'.jpeg': 'image/jpg',
'.xml': MIMETYPE_PAGE
}
MIME_TO_EXT = {
'image/tiff': '.tif',
'image/png': '.png',
'image/jpg': '.jpg',
'image/jpeg': '.jpg',
MIMETYPE_PAGE: '.xml',
'application/alto+xml': '.xml',
}
| 0
| 0
| 0
|
043080aacfee1d363e6e1340bafc54ef3bea32a5
| 3,267
|
py
|
Python
|
VectorUtils/vector3.py
|
Monkvy/VectorUtils
|
880149aa8b67dd6b0a527eafd58a5ef05f97e911
|
[
"MIT"
] | null | null | null |
VectorUtils/vector3.py
|
Monkvy/VectorUtils
|
880149aa8b67dd6b0a527eafd58a5ef05f97e911
|
[
"MIT"
] | null | null | null |
VectorUtils/vector3.py
|
Monkvy/VectorUtils
|
880149aa8b67dd6b0a527eafd58a5ef05f97e911
|
[
"MIT"
] | null | null | null |
from logging import exception
from math import sqrt
from random import uniform
| 30.820755
| 80
| 0.543618
|
from logging import exception
from math import sqrt
from random import uniform
class Vector3:
def __init__(self, x: int | float, y: int | float, z: int | float):
self.x = x
self.y = y
self.z = z
@staticmethod
def random(min: float=-1, max: float=1):
'''
Create an Vector3 object with random values between min and max
'''
return Vector3(uniform(min, max), uniform(min, max), uniform(min, max))
@staticmethod
def fromTuple(tuple: tuple):
'''
Create an Vector3 object with a tuple
'''
return Vector3(tuple[0], tuple[1], tuple[2])
def normalize(self):
mag = self.getMag()
if mag > 0:
self.x /= mag
self.y /= mag
self.z /= mag
def getMag(self):
return sqrt(self.x**2 + self.y**2 + self.z**2)
def setMag(self, magnitude):
newX = self.x * magnitude / self.getMag()
newY = self.y * magnitude / self.getMag()
newZ = self.z * magnitude / self.getMag()
self.x = newX
self.y = newY
self.z = newZ
def toTuple(self):
return self.x, self.y, self.z
def toInt(self):
return Vector3(int(self.x), int(self.y), int(self.z))
def toFloat(self):
return Vector3(float(self.x), float(self.y), float(self.z))
def round(self, n=0):
return Vector3(round(self.x, n), round(self.y, n), round(self.z, n))
def combineToList(self, other):
'''
Returns self and other as a list combined
Args:
other(Vector3 | list | tuple) - the other vector or list to combine
'''
vectors = [self.x, self.y, self.z]
if type(other) == Vector3:
vectors.append(other.x)
vectors.append(other.y)
vectors.append(other.z)
elif type(other) == list or tuple:
for i in other:
if type(i) == int or float:
vectors.append(i)
else:
raise exception(f'{i} is not a valid type')
return vectors
def __repr__(self):
return f'Vector3({self.x}, {self.y})'
def __add__(self, other):
if type(other) == Vector3:
return Vector3(self.x + other.x, self.y + other.y, self.z + other.z)
elif type(other) == int or float:
return Vector3(self.x + other, self.y + other, self.z + other)
def __sub__(self, other):
if type(other) == Vector3:
return Vector3(self.x - other.x, self.y - other.y, self.z - other.z)
elif type(other) == int or float:
return Vector3(self.x - other, self.y - other, self.z - other)
def __mul__(self, other):
if type(other) == Vector3:
return Vector3(self.x * other.x, self.y * other.y, self.z * other.z)
elif type(other) == int or float:
return Vector3(self.x * other, self.y * other, self.z * other)
def __truediv__(self, other):
if type(other) == Vector3:
return Vector3(self.x / other.x, self.y / other.y, self.z / other.z)
elif type(other) == int or float:
return Vector3(self.x / other, self.y / other, self.z / other)
| 1,733
| 1,432
| 23
|
5ec13ba80a976c7b8c28df2901d282722275851c
| 3,661
|
py
|
Python
|
zygrader/ui/templates.py
|
natecraddock/zygrader
|
3a1d5c1dbe76c8f76c2a99f271a26b2ec873006a
|
[
"MIT"
] | 5
|
2019-11-15T17:42:42.000Z
|
2021-04-20T19:35:25.000Z
|
zygrader/ui/templates.py
|
natecraddock/zygrader
|
3a1d5c1dbe76c8f76c2a99f271a26b2ec873006a
|
[
"MIT"
] | 76
|
2020-02-22T01:42:16.000Z
|
2021-04-28T18:47:20.000Z
|
zygrader/ui/templates.py
|
cs142ta/zygrader
|
8ae500ced4cb6f9bcc8a2fd637d8f8dc7a8a9607
|
[
"MIT"
] | 2
|
2020-02-21T04:39:38.000Z
|
2021-04-20T19:35:20.000Z
|
"""UI Templates: For reusable ui pieces built from components."""
from zygrader import ui
from zygrader.zybooks import Zybooks
def filename_input(purpose, text=""):
"""Get a valid filename from the user"""
window = ui.get_window()
path_input = ui.layers.PathInputLayer("Filepath Entry")
path_input.set_prompt(
[f"Enter the path and filename for {purpose} [~ is supported]"])
path_input.set_text(text)
window.run_layer(path_input)
if path_input.canceled:
return None
return path_input.get_path()
| 38.135417
| 85
| 0.565966
|
"""UI Templates: For reusable ui pieces built from components."""
from zygrader import ui
from zygrader.zybooks import Zybooks
class ZybookSectionSelector:
def __init__(self, allow_optional_and_hidden=False):
self.window = ui.get_window()
self.zy_api = Zybooks()
self.allow_optional_and_hidden = allow_optional_and_hidden
def is_allowed(self, section):
return self.allow_optional_and_hidden or (not (section["hidden"]
or section["optional"]))
class _SectionToggle(ui.layers.Toggle):
def __init__(self, index, data):
super().__init__()
self.__index = index
self.__data = data
self.get()
def get(self):
self._toggled = self.__data[self.__index]
def toggle(self):
self.__data[self.__index] = not self.__data[self.__index]
self.get()
def select_zybook_sections(self, return_just_numbers=False, title_extra=""):
self.zybooks_toc = self.zy_api.get_table_of_contents()
if not self.zybooks_toc:
return None
self.zybooks_sections = {(chapter["number"], section["number"]): section
for chapter in self.zybooks_toc
for section in chapter["sections"]}
selected_sections = {(chapter["number"], section["number"]): False
for chapter in self.zybooks_toc
for section in chapter["sections"]}
title = ("Select zyBooks Sections"
if not title_extra else f"{title_extra} - Select Sections")
chapter_pad_width = len(str(len(self.zybooks_toc)))
section_pad_width = max([
len(str(len(chapter["sections"]))) for chapter in self.zybooks_toc
])
popup = ui.layers.ListLayer(title, popup=True)
popup.set_exit_text("Done")
for i, chapter in enumerate(self.zybooks_toc, 1):
row = popup.add_row_parent(
f"{str(chapter['number']):>{chapter_pad_width}} - {chapter['title']}"
)
for j, section in enumerate(chapter["sections"], 1):
section_string = (f"{chapter['number']}"
f".{section['number']:<{section_pad_width}}"
f" - {section['title']}")
if not self.is_allowed(section):
section_string += " (Optional)"
subrow = row.add_row_toggle(
section_string,
ZybookSectionSelector._SectionToggle((i, j),
selected_sections))
# Disable selection of optional sections
if not self.is_allowed(section):
subrow.set_disabled()
self.window.run_layer(popup)
res = []
for section_numbers, selected in selected_sections.items():
if selected:
if return_just_numbers:
res.append(section_numbers)
else:
res.append(self.zybooks_sections[section_numbers])
return res
def filename_input(purpose, text=""):
"""Get a valid filename from the user"""
window = ui.get_window()
path_input = ui.layers.PathInputLayer("Filepath Entry")
path_input.set_prompt(
[f"Enter the path and filename for {purpose} [~ is supported]"])
path_input.set_text(text)
window.run_layer(path_input)
if path_input.canceled:
return None
return path_input.get_path()
| 2,865
| 224
| 23
|
f21eccfc340c873b740e8a9c2712d3502fd1e059
| 926
|
py
|
Python
|
algorithms/sorting/selection_sort.py
|
EthanVieira/CLRS-solutions
|
c20bb7e4b9f26eef62bbd32caae27637e94a4a19
|
[
"MIT"
] | null | null | null |
algorithms/sorting/selection_sort.py
|
EthanVieira/CLRS-solutions
|
c20bb7e4b9f26eef62bbd32caae27637e94a4a19
|
[
"MIT"
] | null | null | null |
algorithms/sorting/selection_sort.py
|
EthanVieira/CLRS-solutions
|
c20bb7e4b9f26eef62bbd32caae27637e94a4a19
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
print(selection_sort([]))
print(selection_sort([1]))
print(selection_sort([0, 100000000, 20000]))
print(selection_sort([1, 0]))
print(selection_sort([1.5, -2.6, 2, 1.1]))
print(selection_sort([3, 6, 8, 1, 2, 5, 3, 9, 3, 5, 9, 2]))
print(selection_sort([3, 6, -45, 1, 2, 5, 3, -9, 3, 0, 9, 2]))
print(selection_sort([3, 6, -45, 1, 2, 5, 3, -9,
3, 0, 9, 2], comparison=lambda a, b: a > b))
print(selection_sort(["hello", "apple", "cat", "zebra", "goat", ""]))
| 37.04
| 73
| 0.536717
|
def selection_sort(items: list, comparison=lambda a, b: a < b):
for i in range(len(items[:-1])):
index = i
candidate = items[i]
for j, item2 in enumerate(items[i:]):
if comparison(item2, candidate):
candidate = item2
index = j + i
items[i], items[index] = items[index], items[i]
return items
if __name__ == "__main__":
print(selection_sort([]))
print(selection_sort([1]))
print(selection_sort([0, 100000000, 20000]))
print(selection_sort([1, 0]))
print(selection_sort([1.5, -2.6, 2, 1.1]))
print(selection_sort([3, 6, 8, 1, 2, 5, 3, 9, 3, 5, 9, 2]))
print(selection_sort([3, 6, -45, 1, 2, 5, 3, -9, 3, 0, 9, 2]))
print(selection_sort([3, 6, -45, 1, 2, 5, 3, -9,
3, 0, 9, 2], comparison=lambda a, b: a > b))
print(selection_sort(["hello", "apple", "cat", "zebra", "goat", ""]))
| 355
| 0
| 22
|
f3f7bd514a45063730bb6565c9ca529d17a83da6
| 1,261
|
py
|
Python
|
src/uvm/dap/uvm_dap.py
|
rodrigomelo9/uvm-python
|
e3127eba2cc1519a61dc6f736d862a8dcd6fce20
|
[
"Apache-2.0"
] | 140
|
2020-01-18T00:14:17.000Z
|
2022-03-29T10:57:24.000Z
|
src/uvm/dap/uvm_dap.py
|
Mohsannaeem/uvm-python
|
1b8768a1358d133465ede9cadddae651664b1d53
|
[
"Apache-2.0"
] | 24
|
2020-01-18T18:40:58.000Z
|
2021-03-25T17:39:07.000Z
|
src/uvm/dap/uvm_dap.py
|
Mohsannaeem/uvm-python
|
1b8768a1358d133465ede9cadddae651664b1d53
|
[
"Apache-2.0"
] | 34
|
2020-01-18T12:22:59.000Z
|
2022-02-11T07:03:11.000Z
|
#//
#//------------------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2011 Cadence Design Systems, Inc.
#// Copyright 2010-2011 Synopsys, Inc.
#// Copyright 2013 NVIDIA Corporation
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//------------------------------------------------------------------------------
#
#`ifndef UVM_DAP_SVH
# `define UVM_DAP_SVH
#
#// Set/Get DAPS
# `include "dap/uvm_set_get_dap_base.svh"
# `include "dap/uvm_simple_lock_dap.svh"
# `include "dap/uvm_get_to_lock_dap.svh"
# `include "dap/uvm_set_before_get_dap.svh"
#
#`endif // UVM_DAP_SVH
#
| 36.028571
| 81
| 0.613799
|
#//
#//------------------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2011 Cadence Design Systems, Inc.
#// Copyright 2010-2011 Synopsys, Inc.
#// Copyright 2013 NVIDIA Corporation
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//------------------------------------------------------------------------------
#
#`ifndef UVM_DAP_SVH
# `define UVM_DAP_SVH
#
#// Set/Get DAPS
# `include "dap/uvm_set_get_dap_base.svh"
# `include "dap/uvm_simple_lock_dap.svh"
# `include "dap/uvm_get_to_lock_dap.svh"
# `include "dap/uvm_set_before_get_dap.svh"
#
#`endif // UVM_DAP_SVH
#
| 0
| 0
| 0
|
4f960221db6ab2cc64d1f2d013d6065459c08ab2
| 2,845
|
py
|
Python
|
service/houston.py
|
sola1993/inmoov
|
34e7bb6e214bd9bf3eee808c19f0ab09ec79345f
|
[
"Apache-2.0"
] | 1
|
2021-02-24T17:05:52.000Z
|
2021-02-24T17:05:52.000Z
|
src/resource/Python/examples/houston.py
|
DarkRebel/myrobotlab
|
b8cf60776bdb4aef2d7dd763dfcdcf8ef99bc955
|
[
"Apache-2.0"
] | null | null | null |
src/resource/Python/examples/houston.py
|
DarkRebel/myrobotlab
|
b8cf60776bdb4aef2d7dd763dfcdcf8ef99bc955
|
[
"Apache-2.0"
] | 1
|
2020-06-03T20:48:47.000Z
|
2020-06-03T20:48:47.000Z
|
# Houston
# Connects a serial device on Windows this would COMx
# Sets the board type
# Then starts polling analog pin 17 which is Analog pin 3
# You will need MRLComm.ino loaded on the Arduino
# If all goes well - you should have 2 traces running
# in the arduino->oscope tab - you can at this point connect
# input - for example a 5v line to the lines and see them change
from time import sleep
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from org.myrobotlab.service import Motor
# variables dependent on your setup
boardType = "atmega2560" # atmega168 | atmega328p | atmega2560 | atmega1280 | atmega32u4
#comPort = "/dev/ttyACM0"
#comPort = "COM9"
lfaencoder = 38
analogSensorPin = 67
# create service for Houston
arduino = runtime.createAndStart("arduino","Arduino")
lshoulder = runtime.createAndStart("lshoulder","Servo")
lbicep = runtime.createAndStart("lbicep","Servo")
lelbow = runtime.createAndStart("lelbow","Servo")
rshoulder = runtime.createAndStart("rshoulder","Servo")
rbicep = runtime.createAndStart("rbicep","Servo")
relbow = runtime.createAndStart("relbow","Servo")
# 4 motors
lfmotor = runtime.createAndStart("lfmotor","Motor") # left front
rfmotor = runtime.createAndStart("rfmotor","Motor") # right front
lbmotor = runtime.createAndStart("lbmotor","Motor") # left back
rbmotor = runtime.createAndStart("rbmotor","Motor") # right back
# set config for the services
arduino.setBoard(boardType) # atmega168 | mega2560 | etc
arduino.connect(comPort,57600,8,1,0)
sleep(1) # give it a second for the serial device to get ready
# attach Servos & Motors to arduino
arduino.servoAttach(lshoulder.getName(), 46)
arduino.servoAttach(lbicep.getName(), 47)
arduino.servoAttach(lelbow.getName(), 48)
arduino.servoAttach(rshoulder.getName(), 50)
arduino.servoAttach(rbicep.getName(), 51)
arduino.servoAttach(relbow.getName(), 52)
arduino.motorAttach(lfmotor.getName(), 4, 30)
arduino.motorAttach(rfmotor.getName(), 5, 31)
arduino.motorAttach(lbmotor.getName(), 6, 32)
arduino.motorAttach(rbmotor.getName(), 7, 33)
# update the gui with configuration changes
arduino.publishState()
lshoulder.publishState()
lbicep.publishState()
lelbow.publishState()
rshoulder.publishState()
rbicep.publishState()
relbow.publishState()
lfmotor.publishState()
rfmotor.publishState()
lbmotor.publishState()
rbmotor.publishState()
# system check - need to do checks to see all systems are go !
# start the analog pin sample to display
# in the oscope
arduino.analogReadPollingStart(analogSensorPin)
# change the pinMode of digital pin 13
arduino.pinMode(lfaencoder, Arduino.OUTPUT)
# begin tracing the digital pin 13
arduino.digitalReadPollStart(lfaencoder)
# turn off the trace
# arduino.digitalReadPollStop(lfaencoder)
# turn off the analog sampling
# arduino.analogReadPollingStop(analogSensorPin)
| 33.081395
| 89
| 0.777153
|
# Houston
# Connects a serial device on Windows this would COMx
# Sets the board type
# Then starts polling analog pin 17 which is Analog pin 3
# You will need MRLComm.ino loaded on the Arduino
# If all goes well - you should have 2 traces running
# in the arduino->oscope tab - you can at this point connect
# input - for example a 5v line to the lines and see them change
from time import sleep
from org.myrobotlab.service import Arduino
from org.myrobotlab.service import Servo
from org.myrobotlab.service import Motor
# variables dependent on your setup
boardType = "atmega2560" # atmega168 | atmega328p | atmega2560 | atmega1280 | atmega32u4
#comPort = "/dev/ttyACM0"
#comPort = "COM9"
lfaencoder = 38
analogSensorPin = 67
# create service for Houston
arduino = runtime.createAndStart("arduino","Arduino")
lshoulder = runtime.createAndStart("lshoulder","Servo")
lbicep = runtime.createAndStart("lbicep","Servo")
lelbow = runtime.createAndStart("lelbow","Servo")
rshoulder = runtime.createAndStart("rshoulder","Servo")
rbicep = runtime.createAndStart("rbicep","Servo")
relbow = runtime.createAndStart("relbow","Servo")
# 4 motors
lfmotor = runtime.createAndStart("lfmotor","Motor") # left front
rfmotor = runtime.createAndStart("rfmotor","Motor") # right front
lbmotor = runtime.createAndStart("lbmotor","Motor") # left back
rbmotor = runtime.createAndStart("rbmotor","Motor") # right back
# set config for the services
arduino.setBoard(boardType) # atmega168 | mega2560 | etc
arduino.connect(comPort,57600,8,1,0)
sleep(1) # give it a second for the serial device to get ready
# attach Servos & Motors to arduino
arduino.servoAttach(lshoulder.getName(), 46)
arduino.servoAttach(lbicep.getName(), 47)
arduino.servoAttach(lelbow.getName(), 48)
arduino.servoAttach(rshoulder.getName(), 50)
arduino.servoAttach(rbicep.getName(), 51)
arduino.servoAttach(relbow.getName(), 52)
arduino.motorAttach(lfmotor.getName(), 4, 30)
arduino.motorAttach(rfmotor.getName(), 5, 31)
arduino.motorAttach(lbmotor.getName(), 6, 32)
arduino.motorAttach(rbmotor.getName(), 7, 33)
# update the gui with configuration changes
arduino.publishState()
lshoulder.publishState()
lbicep.publishState()
lelbow.publishState()
rshoulder.publishState()
rbicep.publishState()
relbow.publishState()
lfmotor.publishState()
rfmotor.publishState()
lbmotor.publishState()
rbmotor.publishState()
# system check - need to do checks to see all systems are go !
# start the analog pin sample to display
# in the oscope
arduino.analogReadPollingStart(analogSensorPin)
# change the pinMode of digital pin 13
arduino.pinMode(lfaencoder, Arduino.OUTPUT)
# begin tracing the digital pin 13
arduino.digitalReadPollStart(lfaencoder)
# turn off the trace
# arduino.digitalReadPollStop(lfaencoder)
# turn off the analog sampling
# arduino.analogReadPollingStop(analogSensorPin)
| 0
| 0
| 0
|
b56f79e1e64d0c4ea986e14f2491e8152330eff1
| 4,797
|
py
|
Python
|
server/run.py
|
S-Kantor/CS446
|
7453f0be2c0380a0af0d429d66690d06b659000e
|
[
"Apache-2.0"
] | 2
|
2019-06-19T17:24:51.000Z
|
2020-01-28T02:38:06.000Z
|
server/run.py
|
S-Kantor/CS446
|
7453f0be2c0380a0af0d429d66690d06b659000e
|
[
"Apache-2.0"
] | 4
|
2019-06-19T12:41:30.000Z
|
2019-07-13T03:19:56.000Z
|
server/run.py
|
S-Kantor/CS446
|
7453f0be2c0380a0af0d429d66690d06b659000e
|
[
"Apache-2.0"
] | 1
|
2019-07-28T07:01:11.000Z
|
2019-07-28T07:01:11.000Z
|
from datetime import datetime
from typing import Dict
from flask import Flask, request, send_file
from music import BeatTimestamp, FileOffsetRecording
from room import Room
app = Flask(__name__)
rooms: Dict[str, Room] = {}
# Accepts datetime in milliseconds and converts to microseconds
@app.route("/")
# --------------------------------------------------------
# Rooms
# --------------------------------------------------------
# Adds a new room to the rooms dictionary and returns it's ID
# The room ID is necessary for all future interactions
@app.route("/create-room", methods=['POST'])
# Allows a new user to validate their room ID
@app.route("/<string:room_id>/is-valid-room-id", methods=['POST'])
# --------------------------------------------------------
# Recording
# --------------------------------------------------------
# Informs the server that a user has begun recording
# All users who start recording in a room must stop recording for
# a composition to be produced
@app.route("/<string:room_id>/start-recording", methods=['POST'])
# Informs the server that a user is finished recording and provides
# the FileOffsetRecordings as a json in the following format:
# {
# 'start_time': "%D:%H:%M:%S.%f" (f is milliseconds)
# 'end_time': "%D:%H:%M:%S.%f"
# 'events' : [
# {
# filename: string, -- name of the audio file (uploaded and default)
# time: "%D:%H:%M:%S.%f"
# loopable: bool
# },
# ...
# ]
# }
# Returns whether the recording session is complete
@app.route("/<string:room_id>/stop-recording", methods=['POST'])
# Upload a sound file to current recording session
# Must be in .mp4 format and with the filename that will be used to
# reference the file in the offsets of the FileOffsetRecordings
@app.route("/<string:room_id>/upload-sound", methods=['PUT'])
# --------------------------------------------------------
# Getting Composition
# --------------------------------------------------------
# Returns whether a is recording complete, meaning the same number of users
# who started recording have stopped
# Allows users to poll when they should call get-composition
@app.route("/<string:room_id>/is-recording-complete")
# Returns the generated composition as an mp3 file
# Produces composition if necessary with the FileOffsetRecordings
@app.route("/<string:room_id>/get-composition")
# --------------------------------------------------------
# Misc.
# --------------------------------------------------------
# Simple health check to test connection to server
@app.route("/health-check")
# Can be called as an hourly chron job to clean expired data
@app.route("/cleanup", methods=['POST'])
| 33.78169
| 94
| 0.633729
|
from datetime import datetime
from typing import Dict
from flask import Flask, request, send_file
from music import BeatTimestamp, FileOffsetRecording
from room import Room
app = Flask(__name__)
rooms: Dict[str, Room] = {}
# Accepts datetime in milliseconds and converts to microseconds
def parse_time(time_as_string):
return datetime.strptime(time_as_string + '000', "%j:%H:%M:%S:%f")
@app.route("/")
def hello():
return "Hello World!"
# --------------------------------------------------------
# Rooms
# --------------------------------------------------------
# Adds a new room to the rooms dictionary and returns it's ID
# The room ID is necessary for all future interactions
@app.route("/create-room", methods=['POST'])
def create_room():
room_id = Room.gen_room_id(rooms.keys())
new_room = Room(room_id)
rooms[new_room.id] = new_room
app.logger.debug('room created: %s', str(new_room))
return str(new_room.id)
# Allows a new user to validate their room ID
@app.route("/<string:room_id>/is-valid-room-id", methods=['POST'])
def is_valid_room_id(room_id):
app.logger.debug('validating room: %s', room_id)
return str(room_id in rooms)
# --------------------------------------------------------
# Recording
# --------------------------------------------------------
# Informs the server that a user has begun recording
# All users who start recording in a room must stop recording for
# a composition to be produced
@app.route("/<string:room_id>/start-recording", methods=['POST'])
def start_recording(room_id):
app.logger.debug('a user started recording: %s', room_id)
return str(rooms[room_id].start_recording())
# Informs the server that a user is finished recording and provides
# the FileOffsetRecordings as a json in the following format:
# {
# 'start_time': "%D:%H:%M:%S.%f" (f is milliseconds)
# 'end_time': "%D:%H:%M:%S.%f"
# 'events' : [
# {
# filename: string, -- name of the audio file (uploaded and default)
# time: "%D:%H:%M:%S.%f"
# loopable: bool
# },
# ...
# ]
# }
# Returns whether the recording session is complete
@app.route("/<string:room_id>/stop-recording", methods=['POST'])
def stop_recording(room_id):
app.logger.debug('a user stopped recording: %s', room_id)
json = request.get_json()
app.logger.debug(json)
timestamps = [BeatTimestamp(bool(e['loopable']), parse_time(e['dateTime']), e['fileName'])
for e in json['recordingEntries']]
offset_recording = FileOffsetRecording(
parse_time(json['startTime']),
parse_time(json['endTime']),
timestamps)
complete = rooms[room_id].stop_recording(offset_recording)
app.logger.debug('last user: %s', complete)
return str(complete)
# Upload a sound file to current recording session
# Must be in .mp4 format and with the filename that will be used to
# reference the file in the offsets of the FileOffsetRecordings
@app.route("/<string:room_id>/upload-sound", methods=['PUT'])
def upload_sound(room_id):
filename = request.files.keys[0]
app.logger.debug('new file %s uploaded to room %s', filename, room_id)
file = request.files[filename]
return str(rooms[room_id].add_new_sound(filename, file))
# --------------------------------------------------------
# Getting Composition
# --------------------------------------------------------
# Returns whether a is recording complete, meaning the same number of users
# who started recording have stopped
# Allows users to poll when they should call get-composition
@app.route("/<string:room_id>/is-recording-complete")
def is_recording_complete(room_id):
app.logger.debug('checking whether recording is complete for room %s', room_id)
return str(rooms[room_id].is_recording_complete())
# Returns the generated composition as an mp3 file
# Produces composition if necessary with the FileOffsetRecordings
@app.route("/<string:room_id>/get-composition")
def get_composition(room_id):
app.logger.debug('getting composition for room %s', room_id)
file = rooms[room_id].get_composition_as_mp3()
return send_file(file, mimetype="audio/mpeg")
# --------------------------------------------------------
# Misc.
# --------------------------------------------------------
# Simple health check to test connection to server
@app.route("/health-check")
def health_check():
return "I'm Alive!"
# Can be called as an hourly chron job to clean expired data
@app.route("/cleanup", methods=['POST'])
def cleanup_rooms():
app.logger.debug('server cleaning up')
expired_rooms = [room_id for room_id, room in rooms if room.is_expired()]
for key in expired_rooms:
app.logger.debug('%s has expired', str(key))
del rooms[key]
| 1,840
| 0
| 242
|
d343e0f5bb0c664e34d052bfc81a5955774763f5
| 295
|
py
|
Python
|
src/scs_dfe/interface/__init__.py
|
south-coast-science/scs_dfe_eng
|
05708b27ba65438d11fad0d947bcff3df37dc87d
|
[
"MIT"
] | null | null | null |
src/scs_dfe/interface/__init__.py
|
south-coast-science/scs_dfe_eng
|
05708b27ba65438d11fad0d947bcff3df37dc87d
|
[
"MIT"
] | null | null | null |
src/scs_dfe/interface/__init__.py
|
south-coast-science/scs_dfe_eng
|
05708b27ba65438d11fad0d947bcff3df37dc87d
|
[
"MIT"
] | 2
|
2017-12-05T12:41:48.000Z
|
2019-09-29T14:41:30.000Z
|
"""
Created on 16 Apr 2017
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from scs_dfe.interface.component.pca8574 import PCA8574State
# --------------------------------------------------------------------------------------------------------------------
PCA8574State.init()
| 22.692308
| 118
| 0.461017
|
"""
Created on 16 Apr 2017
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from scs_dfe.interface.component.pca8574 import PCA8574State
# --------------------------------------------------------------------------------------------------------------------
PCA8574State.init()
| 0
| 0
| 0
|
750985c33cdae87f7c5dfdfc9bedf623dce189ed
| 28,548
|
py
|
Python
|
basin_setup/delineate.py
|
USDA-ARS-NWRC/basin_setup
|
8a9f661ab7897d63dda6c470cfd24f498e6e6183
|
[
"CC0-1.0"
] | 1
|
2020-10-15T00:53:46.000Z
|
2020-10-15T00:53:46.000Z
|
basin_setup/delineate.py
|
USDA-ARS-NWRC/basin_setup
|
8a9f661ab7897d63dda6c470cfd24f498e6e6183
|
[
"CC0-1.0"
] | 58
|
2017-12-13T23:37:12.000Z
|
2021-07-26T19:58:05.000Z
|
basin_setup/delineate.py
|
USDA-ARS-NWRC/basin_setup
|
8a9f661ab7897d63dda6c470cfd24f498e6e6183
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import datetime
import os
import shutil
import sys
import time
from subprocess import check_output
import geopandas as gpd
import numpy as np
from colorama import Fore, Style, init
from basin_setup import __version__
# Initialize colors
init()
DEBUG = False
out = Messages()
def check_path(filename, outfile=False):
"""
Checks whether an file has been provided exists.
If outfile is true then we assume we are making a file and there fore we
should only check if the directory exists.
Args:
filename: path to a file
outfile: Boolean indicating whether to check for a file (outfile=False)
or a directory (outfile==True)
"""
folder = os.path.dirname(filename)
if outfile and not os.path.isdir(folder):
out.error("Directory provided for output location does not exist!"
"\nMissing----->{}".format(filename))
sys.exit()
if not outfile and not os.path.isfile(filename):
out.error("Input file does not exist!\nMissing----->{}"
"".format(filename))
sys.exit()
def run_cmd(cmd, nthreads=None):
"""
Executes the command and pipes the output to the console.
Args:
cmd: String command to be entered in the the command prompt
"""
out.dbg('Running {}'.format(cmd))
if nthreads is not None:
cmd = 'mpiexec -n {0} '.format(nthreads) + cmd
s = check_output(cmd, shell=True, universal_newlines=True)
out.dbg(s)
def pitremove(demfile, outfile=None, nthreads=None):
"""
STEP #1
Builds the command to pit fill the DEM and executes it.
Args:
demfile: Path to tif of the DEM.
outfile: Path to write the pit filled DEM.
nthreads: Number of cores to use for mpiexec
"""
out.msg("Removing Pits from DEM...")
if outfile is None:
outfile = 'filled.tif'
check_path(demfile)
check_path(outfile, outfile=True)
CMD = "pitremove -z {0} -fel {1}".format(demfile, outfile)
run_cmd(CMD, nthreads=nthreads)
def calcD8Flow(filled_dem, d8dir_file=None, d8slope_file=None, nthreads=None):
"""
STEP #2
Builds the command to calculate the D8 flow for the flow direction and
executes it.
Args:
filled_dem: Path to tif of the pit filled DEM.
d8dir_file: Path to write the D8 flow direction.
d8slope_file: Path to write the D8 flow slope.
nthreads: Number of cores to use for mpiexec
"""
out.msg("Calculating D8 flow direction...")
# Check paths
check_path(filled_dem)
check_path(d8dir_file, outfile=True)
check_path(d8slope_file, outfile=True)
CMD = "d8flowdir -fel {0} -p {1} -sd8 {2}".format(filled_dem,
d8dir_file,
d8slope_file)
run_cmd(CMD, nthreads=nthreads)
def calcD8DrainageArea(d8flowdir, areaD8_out=None, nthreads=None):
"""
STEP #3
Calculates D8 Contributing area to each cell in the DEM.
Args:
d8flowdir: Path to the D8 Flow direction image
areaD8_out: Path to output the Drainage area image
nthreads: Number of cores to use for mpiexec
"""
check_path(d8flowdir)
check_path(areaD8_out, outfile=True)
CMD = "aread8 -p {0} -ad8 {1}".format(d8flowdir, areaD8_out)
run_cmd(CMD, nthreads=nthreads)
def defineStreamsByThreshold(areaD8, threshold_streams_out=None, threshold=100,
nthreads=None):
"""
STEP #4
Stream definition by threshold in order to extract a first version of the
stream network
Args:
areaD8: Path to the D8 Drainage area image
threshold_streams_out: Path to output the thresholded image
threshold: threshold value to recategorize the data
nthreads: Number of cores to use for mpiexec
"""
out.msg(
"Performing stream estimation using threshold of {0}".format(
threshold))
check_path(areaD8)
check_path(threshold_streams_out, outfile=True)
CMD = "threshold -ssa {0} -src {1} -thresh {2}".format(
areaD8,
threshold_streams_out,
threshold)
run_cmd(CMD, nthreads=nthreads)
def outlets_2_streams(d8flowdir, threshold_streams, pour_points,
new_pour_points=None,
nthreads=None):
"""
STEP #5 Move Outlets to Streams, so as to move the catchment outlet point
on one of the DEM cells identified by TauDEM as belonging to the
stream network
Args:
d8flowdir: Path to the D8 Flow direction image
threshold_streams: Path to output the thresholded stream image
pour_points: Path to pour point locations in a list
new_pour_points: Path to output the new list of points
nthreads: Number of cores to use for mpiexec
"""
check_path(d8flowdir)
check_path(threshold_streams)
check_path(pour_points)
check_path(new_pour_points, outfile=True)
CMD = 'moveoutletstostrm -p {0} -src {1} -o {2} -om {3}'.format(
d8flowdir,
threshold_streams,
pour_points,
new_pour_points)
run_cmd(CMD, nthreads=nthreads)
def calcD8DrainageAreaBasin(d8flowdir, basin_outlets_moved, areaD8_out=None,
nthreads=None):
"""
STEP #6
D8 Contributing Area again, but with the catchment outlet point as
additional input data
Args:
d8flowdir: Path to the D8 Flow direction image
basin_outlets_moved: all pour points that have been moved to the stream
areaD8_out: Path to output the Drainage area image that utilize all the
points
nthreads: Number of cores to use for mpiexec
"""
out.msg("Calculating drainage area using pour points...")
check_path(d8flowdir)
check_path(basin_outlets_moved)
check_path(areaD8_out, outfile=True)
CMD = 'aread8 -p {0} -o {1} -ad8 {2}'.format(d8flowdir,
basin_outlets_moved,
areaD8_out)
run_cmd(CMD, nthreads=nthreads)
def delineate_streams(dem, d8flowdir, basin_drain_area, threshold_streams,
basin_outlets_moved, stream_orderfile=None,
treefile=None, coordfile=None, netfile=None,
wfile=None, nthreads=None):
"""
STEP #8 Stream Reach And Watershed
Args:
dem: path to a filled dem image
d8flowdir: path to the flow direction image
basin_drain_area: path to the flow accumulation image for the basin
threshold_streams: streams defintion image defined by a threshold
basin_outlets_moved: Path to a .bna of the pour points corrected to be
on the streams.
stream_orderfile: Name of the file to output the stream segment order
treefile: Name of the file to output the subbasin flow order.
coordfile: Not sure what this file is
netfile: Name of the images to output the stream definitions.
wfile: Name of the image to output subbasin definitions.
nthreads: Number of cores to use for mpiexec
"""
out.msg("Creating watersheds and stream files...")
# Check path validity
inputs = [dem, d8flowdir, basin_drain_area, threshold_streams,
basin_outlets_moved]
outputs = [stream_orderfile, treefile, coordfile, netfile, wfile]
for f in inputs:
check_path(f)
for f in outputs:
check_path(f, outfile=True)
CMD = ('streamnet -fel {0} -p {1} -ad8 {2} -src {3} -ord {4} -tree {5}'
' -coord {6} -net {7} -o {8} -w {9}').format(
dem,
d8flowdir,
basin_drain_area,
threshold_streams,
stream_orderfile,
treefile,
coordfile,
netfile,
basin_outlets_moved,
wfile)
run_cmd(CMD, nthreads=nthreads)
def convert2ascii(infile, outfile=None):
"""
Convert to ascii
"""
check_path(infile)
check_path(outfile, outfile=True)
# convert wfile files to ascii
CMD = 'gdal_translate -of AAIGrid {0} {1}'.format(infile, outfile)
run_cmd(CMD)
def produce_shapefiles(watershed_tif, corrected_points,
output_dir=None, streamflow=False):
"""
Outputs the polygons of the individual subbasins to a shapfile.
Args:
watershed_tif: Path to a geotiff of the watersheds
corrected_points: Path to the corrected points used for delineation
output_dir: Output location used for producing shapefiles
"""
# Check files
check_path(watershed_tif)
check_path(corrected_points)
wfname = os.path.basename(watershed_tif).split('.')[0] + '.shp'
# Polygonize creates a raster with all subbasins
watershed_shp = os.path.join(output_dir, wfname)
CMD = 'gdal_polygonize.py -f "ESRI SHAPEFILE" {} {}'.format(watershed_tif,
watershed_shp)
run_cmd(CMD)
# Read in and identify the names of the pour points with the subbasins
ptdf = gpd.read_file(corrected_points)
wdf = gpd.read_file(watershed_shp)
# Identify the name and output the individual basins
for nm, pt in zip(ptdf['Primary ID'].values, ptdf['geometry'].values):
for pol, idx in zip(wdf['geometry'].values, wdf.index):
if pt.within(pol):
# Create a new dataframe and output it
df = gpd.GeoDataFrame(columns=wdf.columns, crs=wdf.crs)
df = df.append(wdf.loc[idx])
out.msg("Creating the subbasin outline for {}...".format(nm))
df.to_file(os.path.join(output_dir, '{}_subbasin.shp'
''.format(
(nm.lower()).replace(' ', '_'))
))
# Output the full basin outline
out.msg("Creating the entire basin outline...")
same = np.ones(len(wdf.index))
wdf['all'] = same
basin_outline = wdf.dissolve(by='all')
basin_outline.to_file(os.path.join(output_dir, 'basin_outline.shp'))
return watershed_shp
def create_readme(sysargs, output_dir):
"""
Creates a readme with all the details for creating the files
Args:
sysargs: command used for generating files
"""
dt = ((datetime.datetime.today()).isoformat()).split('T')[0]
out_str = (
"###################################################################\n"
"# BASIN DELINEATION TOOL V{0}\n"
"###################################################################\n"
"\n The files in this folder were generated on {1}.\n"
"This was accomplished using the following command:\n"
"\n$ {2}\n"
"\nTo get access to the source code please visit:\n"
"https://github.com/USDA-ARS-NWRC/basin_setup")
out_str = out_str.format(__version__, dt, ' '.join(sys.argv))
with open(os.path.join(output_dir, 'README.txt'), 'w') as fp:
fp.write(out_str)
fp.close()
def cleanup(output_dir, at_start=False):
"""
Removes the temp folder and removes the following files:
* output/watersheds.shp
* output/*_subbasin.shp
* output/basin_outline.shp
* output/corrected_points.shp
Args:
output_dir: folder to lookin for cleanup
at_start: If at the beginning we cleanup a lot more files versus
than at the end of a run.
"""
out.msg("Cleaning up files...")
# Always cleanup the temp folder
temp = os.path.join(output_dir, 'temp')
if os.path.isdir(temp):
shutil.rmtree(temp)
if at_start:
# Remove any potential streamflow folders
streamflow = os.path.join(output_dir, 'streamflow')
if os.path.isdir(streamflow):
shutil.rmtree(streamflow)
fnames = os.listdir(output_dir)
for f in fnames:
fn = os.path.join(output_dir, f)
if ("_subbasin." in f or "thresh" in f or "basin_outline." in f
or 'watersheds_' in f or 'out.' in f
or "corrected_points_" in f):
out.dbg("Removing {}".format(f))
os.remove(fn)
def confirm_norerun(non_thresholdkeys, imgs):
"""
Checks if the non-thresholded files exist, if so confirm the user wants
to overwrite them.
Args:
non-thresholdedkeys: keys to check in the imgs dictionary of paths
imgs: Dictionary of paths to images
Returns
bool: Indicating whether we continue or not
"""
out.dbg("Checking if important delineation images pre-exist...")
# Quickly check if the user wants to over write a possible rerun
move_forward = False
any_file_exists = False
for f in non_thresholdkeys:
if os.path.isfile(imgs[f]):
out.dbg("{} image exists!".format(f))
any_file_exists = True
out.warn("You are about to overwrite the delineation files that"
" take the longest to make. \n\nAre you sure you want to"
" do this? (y/n)\n")
answer = input()
acceptable_answer = False
while not acceptable_answer:
if answer.lower() == 'y':
acceptable_answer = True
move_forward = True
elif answer.lower() == 'n':
acceptable_answer = True
else:
acceptable_answer = False
break
# If there weren't any files then move ahead
if not any_file_exists:
move_forward = True
out.dbg("No pre-existing files, moving forward...")
return move_forward
def create_ars_streamflow_files(treefile, coordfile, threshold, wshp, netdir,
output='basin_catchments.csv'):
"""
Takes in the Tree file and the Coordinates file to produce a csv of the
downstream catchment, the elevation of a catchment, and contributing area
"""
today = (datetime.datetime.today().date()).isoformat()
header = ("#############################################################\n"
" Basin Catchment File for USDA-ARS-NWRC Streamflow modeling. \n"
" Delineatation Threshold: {}\n"
" Date Created: {}\n"
" Created using basin_setup v{}\n"
"#############################################################\n"
"\n".format(threshold, today,
__version__)
)
with open(output, 'w+') as fp:
fp.write(header)
fp.close()
# tree_names = ['link', 'start number', 'end number', 'downstream',
# 'upstream',
# 'strahler',
# 'monitor point',
# 'network magnitude']
# coord_names = ['dummy', 'x', 'y', 'distance', 'elevation', 'area']
# dftree = pd.read_csv(treefile, delimiter='\t', names=tree_names)
# dfcoord = pd.read_csv(coordfile, delimiter='\t', names=coord_names)
dfwshp = gpd.read_file(wshp)
# Get the network shpapefile which lives under a folder named after the
# tif.
name = os.path.split(netdir)[-1].split('.')[0] + '.shp'
netshp = os.path.join(netdir, name)
dfnet = gpd.read_file(netshp)
dfnet = dfnet.set_index('WSNO')
# Collect the area of each basin
dfwshp['area'] = dfwshp.area
# handle individual cells acting as subbasins
dfwshp = dfwshp.groupby('DN').sum()
# Collect down stream info.
dfwshp['downstream'] = dfnet['DSLINKNO']
dfwshp.to_csv(output, mode='a')
def output_streamflow(imgs, threshold, wshp, temp="temp",
output_dir='streamflow'):
"""
Outputs files necessary for streamflow modeling. This will create a file
structure under a folder defined by output_dir and the threshold.
E.g. streamflow/thresh_10000000
Args:
imgs: Dictionary containing a files to be outputted.
threshold: threshold used for creating subbasins
wshp: Watershed shapefile
output_dir: Location to output files
"""
# Dictionary to grab filenames for ARS streamflow
dat = {}
out.msg("Creating streamflow files...")
final_output = os.path.join(output_dir, "thresh_{}".format(threshold))
if not os.path.isdir(output_dir):
out.msg("Making streamflow directory")
os.mkdir(output_dir)
if not os.path.isdir(final_output):
out.msg("Making streamflow threshold directory...")
os.mkdir(final_output)
# Convert the watersheds to ascii and move files to streamflow folder for
# SLF streamflow
for k in ['corrected_points', 'watersheds', 'coord', 'tree']:
name = os.path.basename(imgs[k])
outfile = os.path.join(final_output, k + "." + name.split('.')[-1])
# Handle grabbing data for outputing ARS streamflow
if k in ['tree', 'coord']:
dat[k] = outfile
if k == 'watersheds':
outfile = os.path.join(final_output, k + '.asc')
convert2ascii(imgs[k], outfile)
else:
shutil.copy(imgs[k], outfile)
# Copy over threshold files
for f in os.listdir(imgs['net']):
to_f = os.path.join(final_output, os.path.basename(f))
shutil.copy(os.path.join(imgs["net"], f), to_f)
# Create the files for ARS Streamflow
create_ars_streamflow_files(dat['tree'],
dat['coord'],
threshold,
wshp,
imgs['net'],
output=os.path.join(final_output,
'basin_catchments.csv'))
def ernestafy(demfile, pour_points, output=None, temp=None, threshold=100,
rerun=False,
nthreads=None,
out_streams=False):
"""
Run TauDEM using the script Ernesto Made.... therefore we will
ernestafy this basin.
Args:
demfile: Original DEM tif.
pour_points: Locations of the pour_points in a .bna file format
output: Output folder location, default is ./delineation
threshold: Threshold to use, can be a list or a single value
rerun: boolean indicating whether to avoid re-doing steps 1-3
out_streams: Boolean determining whether to output the files for
streamflow modeling
"""
create_readme(sys.argv, output)
# Output File keys without a threshold in the filename
non_thresholdkeys = ['filled', 'flow_dir', 'slope', 'drain_area',
'basin_drain_area']
# Output File keys WITH a threshold in the filename
thresholdkeys = ['thresh_streams', 'thresh_basin_streams', 'order', 'tree',
'coord', 'net', 'watersheds', 'basin_outline',
'corrected_points']
filekeys = non_thresholdkeys + thresholdkeys
# Create file paths for the output file management
imgs = {}
for k in filekeys:
base = os.path.join(output, k)
# Add the threshold to the filename if need be
if k in thresholdkeys:
base = os.path.join(temp, k)
base += '_thresh_{}'.format(threshold)
# Watchout for shapefiles
if 'points' in k:
imgs[k] = base + '.shp'
# Files we need for streamflow
elif k in ['coord', 'tree']:
imgs[k] = base + '.dat'
else:
imgs[k] = base + '.tif'
# This file if it already exists causes problems
if os.path.isfile(imgs['net']):
out.msg("Removing pre-existing stream network file...")
os.remove(imgs['net'])
# If we rerun we don't want to run steps 1-3 again
if rerun:
out.warn("Performing a rerun, assuming files for flow direction and"
" accumulation exist...")
else:
move_forward = confirm_norerun(non_thresholdkeys, imgs)
if move_forward:
# 1. Pit Remove in order to fill the pits in the DEM
pitremove(demfile, outfile=imgs['filled'], nthreads=nthreads)
# 2. D8 Flow Directions in order to compute the flow direction in
# each DEM cell
calcD8Flow(imgs['filled'], d8dir_file=imgs['flow_dir'],
d8slope_file=imgs['slope'],
nthreads=nthreads)
# 3. D8 Contributing Area so as to compute the drainage area in
# each DEM cell
calcD8DrainageArea(imgs['flow_dir'], areaD8_out=imgs['drain_area'],
nthreads=nthreads)
else:
out.msg("Please use the '--rerun' flag to perform a rerun.\n")
sys.exit()
##########################################################################
# This section and below gets run every call. (STEPS 4-8)
##########################################################################
# 4. Stream Definition by Threshold, in order to extract a first version of
# the stream network
defineStreamsByThreshold(imgs['drain_area'],
threshold_streams_out=imgs['thresh_streams'],
threshold=threshold, nthreads=nthreads)
# 5. Move Outlets to Streams, so as to move the catchment outlet point on
# one of the DEM cells identified by TauDEM as belonging to the stream
# network
outlets_2_streams(imgs['flow_dir'], imgs['thresh_streams'], pour_points,
new_pour_points=imgs['corrected_points'],
nthreads=nthreads)
# 6. D8 Contributing Area again, but with the catchment outlet point as
# additional input data
calcD8DrainageAreaBasin(imgs['flow_dir'], imgs['corrected_points'],
areaD8_out=imgs['basin_drain_area'],
nthreads=nthreads)
# 7. Stream Definition by Threshold again, but with the catchment outlet
# point as additional input data
defineStreamsByThreshold(imgs['basin_drain_area'],
threshold_streams_out=imgs['thresh_basin_streams'], # noqa
threshold=threshold,
nthreads=nthreads)
# 8. Stream Reach And Watershed
delineate_streams(demfile, imgs['flow_dir'], imgs['basin_drain_area'],
imgs['thresh_basin_streams'], imgs['corrected_points'],
stream_orderfile=imgs['order'], treefile=imgs['tree'],
coordfile=imgs['coord'], netfile=imgs['net'],
wfile=imgs['watersheds'], nthreads=nthreads)
# Output the shapefiles of the watershed
wshp = produce_shapefiles(imgs['watersheds'], imgs['corrected_points'],
output_dir=output)
if out_streams:
output_streamflow(imgs, threshold, wshp, temp=temp,
output_dir=os.path.join(output, 'streamflow'))
if __name__ == '__main__':
main()
| 34.271309
| 88
| 0.585855
|
#!/usr/bin/env python3
import argparse
import datetime
import os
import shutil
import sys
import time
from subprocess import check_output
import geopandas as gpd
import numpy as np
from colorama import Fore, Style, init
from basin_setup import __version__
# Initialize colors
init()
DEBUG = False
class Messages():
def __init__(self):
self.context = {'warning': Fore.YELLOW,
'error': Fore.RED,
'ok': Fore.GREEN,
'normal': Style.NORMAL + Fore.WHITE,
'header': Style.BRIGHT}
def build_msg(self, str_msg, context_str=None):
"""
Constructs the desired strings for color and Style
Args;
str_msg: String the user wants to output
context_str: type of print style and color, key associated with
self.context
Returns:
final_msg: Str containing the desired colors and styles
"""
if context_str is None:
context_str = 'normal'
if context_str in self.context.keys():
if isinstance(str_msg, list):
str_msg = ', '.join([str(s) for s in str_msg])
final_msg = self.context[context_str] + str_msg + Style.RESET_ALL
else:
raise ValueError("Not a valid context")
return final_msg
def _structure_msg(self, a_msg):
if isinstance(a_msg, list):
a_msg = ', '.join([str(s) for s in a_msg])
if not isinstance(a_msg, str):
a_msg = str(a_msg)
return a_msg
def msg(self, str_msg, context_str=None):
final_msg = self.build_msg(str_msg, context_str)
print('\n' + final_msg)
def dbg(self, str_msg, context_str=None):
"""
Messages designed for debugging set by a global variable DEBUG
"""
if DEBUG:
final_msg = self.build_msg('[DEBUG]: ', 'header')
final_msg += self._structure_msg(str_msg)
final_msg = self.build_msg(final_msg, context_str)
print('\n' + final_msg)
def warn(self, str_msg):
final_msg = self.build_msg('[WARNING]: ', 'header')
final_msg = self.build_msg(final_msg + str_msg, 'warning')
print('\n' + final_msg)
def error(self, str_msg):
final_msg = self.build_msg('[ERROR]: ', 'header')
final_msg = self.build_msg(final_msg + str_msg, 'error')
print('\n' + final_msg)
def respond(self, str_msg):
"""
Messages acting like a confirmation to the user and in response to the
previous message
"""
final_msg = self.build_msg(str_msg, 'ok')
print('\t' + final_msg)
out = Messages()
def check_path(filename, outfile=False):
"""
Checks whether an file has been provided exists.
If outfile is true then we assume we are making a file and there fore we
should only check if the directory exists.
Args:
filename: path to a file
outfile: Boolean indicating whether to check for a file (outfile=False)
or a directory (outfile==True)
"""
folder = os.path.dirname(filename)
if outfile and not os.path.isdir(folder):
out.error("Directory provided for output location does not exist!"
"\nMissing----->{}".format(filename))
sys.exit()
if not outfile and not os.path.isfile(filename):
out.error("Input file does not exist!\nMissing----->{}"
"".format(filename))
sys.exit()
def run_cmd(cmd, nthreads=None):
"""
Executes the command and pipes the output to the console.
Args:
cmd: String command to be entered in the the command prompt
"""
out.dbg('Running {}'.format(cmd))
if nthreads is not None:
cmd = 'mpiexec -n {0} '.format(nthreads) + cmd
s = check_output(cmd, shell=True, universal_newlines=True)
out.dbg(s)
def pitremove(demfile, outfile=None, nthreads=None):
"""
STEP #1
Builds the command to pit fill the DEM and executes it.
Args:
demfile: Path to tif of the DEM.
outfile: Path to write the pit filled DEM.
nthreads: Number of cores to use for mpiexec
"""
out.msg("Removing Pits from DEM...")
if outfile is None:
outfile = 'filled.tif'
check_path(demfile)
check_path(outfile, outfile=True)
CMD = "pitremove -z {0} -fel {1}".format(demfile, outfile)
run_cmd(CMD, nthreads=nthreads)
def calcD8Flow(filled_dem, d8dir_file=None, d8slope_file=None, nthreads=None):
"""
STEP #2
Builds the command to calculate the D8 flow for the flow direction and
executes it.
Args:
filled_dem: Path to tif of the pit filled DEM.
d8dir_file: Path to write the D8 flow direction.
d8slope_file: Path to write the D8 flow slope.
nthreads: Number of cores to use for mpiexec
"""
out.msg("Calculating D8 flow direction...")
# Check paths
check_path(filled_dem)
check_path(d8dir_file, outfile=True)
check_path(d8slope_file, outfile=True)
CMD = "d8flowdir -fel {0} -p {1} -sd8 {2}".format(filled_dem,
d8dir_file,
d8slope_file)
run_cmd(CMD, nthreads=nthreads)
def calcD8DrainageArea(d8flowdir, areaD8_out=None, nthreads=None):
"""
STEP #3
Calculates D8 Contributing area to each cell in the DEM.
Args:
d8flowdir: Path to the D8 Flow direction image
areaD8_out: Path to output the Drainage area image
nthreads: Number of cores to use for mpiexec
"""
check_path(d8flowdir)
check_path(areaD8_out, outfile=True)
CMD = "aread8 -p {0} -ad8 {1}".format(d8flowdir, areaD8_out)
run_cmd(CMD, nthreads=nthreads)
def defineStreamsByThreshold(areaD8, threshold_streams_out=None, threshold=100,
nthreads=None):
"""
STEP #4
Stream definition by threshold in order to extract a first version of the
stream network
Args:
areaD8: Path to the D8 Drainage area image
threshold_streams_out: Path to output the thresholded image
threshold: threshold value to recategorize the data
nthreads: Number of cores to use for mpiexec
"""
out.msg(
"Performing stream estimation using threshold of {0}".format(
threshold))
check_path(areaD8)
check_path(threshold_streams_out, outfile=True)
CMD = "threshold -ssa {0} -src {1} -thresh {2}".format(
areaD8,
threshold_streams_out,
threshold)
run_cmd(CMD, nthreads=nthreads)
def outlets_2_streams(d8flowdir, threshold_streams, pour_points,
new_pour_points=None,
nthreads=None):
"""
STEP #5 Move Outlets to Streams, so as to move the catchment outlet point
on one of the DEM cells identified by TauDEM as belonging to the
stream network
Args:
d8flowdir: Path to the D8 Flow direction image
threshold_streams: Path to output the thresholded stream image
pour_points: Path to pour point locations in a list
new_pour_points: Path to output the new list of points
nthreads: Number of cores to use for mpiexec
"""
check_path(d8flowdir)
check_path(threshold_streams)
check_path(pour_points)
check_path(new_pour_points, outfile=True)
CMD = 'moveoutletstostrm -p {0} -src {1} -o {2} -om {3}'.format(
d8flowdir,
threshold_streams,
pour_points,
new_pour_points)
run_cmd(CMD, nthreads=nthreads)
def calcD8DrainageAreaBasin(d8flowdir, basin_outlets_moved, areaD8_out=None,
nthreads=None):
"""
STEP #6
D8 Contributing Area again, but with the catchment outlet point as
additional input data
Args:
d8flowdir: Path to the D8 Flow direction image
basin_outlets_moved: all pour points that have been moved to the stream
areaD8_out: Path to output the Drainage area image that utilize all the
points
nthreads: Number of cores to use for mpiexec
"""
out.msg("Calculating drainage area using pour points...")
check_path(d8flowdir)
check_path(basin_outlets_moved)
check_path(areaD8_out, outfile=True)
CMD = 'aread8 -p {0} -o {1} -ad8 {2}'.format(d8flowdir,
basin_outlets_moved,
areaD8_out)
run_cmd(CMD, nthreads=nthreads)
def delineate_streams(dem, d8flowdir, basin_drain_area, threshold_streams,
basin_outlets_moved, stream_orderfile=None,
treefile=None, coordfile=None, netfile=None,
wfile=None, nthreads=None):
"""
STEP #8 Stream Reach And Watershed
Args:
dem: path to a filled dem image
d8flowdir: path to the flow direction image
basin_drain_area: path to the flow accumulation image for the basin
threshold_streams: streams defintion image defined by a threshold
basin_outlets_moved: Path to a .bna of the pour points corrected to be
on the streams.
stream_orderfile: Name of the file to output the stream segment order
treefile: Name of the file to output the subbasin flow order.
coordfile: Not sure what this file is
netfile: Name of the images to output the stream definitions.
wfile: Name of the image to output subbasin definitions.
nthreads: Number of cores to use for mpiexec
"""
out.msg("Creating watersheds and stream files...")
# Check path validity
inputs = [dem, d8flowdir, basin_drain_area, threshold_streams,
basin_outlets_moved]
outputs = [stream_orderfile, treefile, coordfile, netfile, wfile]
for f in inputs:
check_path(f)
for f in outputs:
check_path(f, outfile=True)
CMD = ('streamnet -fel {0} -p {1} -ad8 {2} -src {3} -ord {4} -tree {5}'
' -coord {6} -net {7} -o {8} -w {9}').format(
dem,
d8flowdir,
basin_drain_area,
threshold_streams,
stream_orderfile,
treefile,
coordfile,
netfile,
basin_outlets_moved,
wfile)
run_cmd(CMD, nthreads=nthreads)
def convert2ascii(infile, outfile=None):
"""
Convert to ascii
"""
check_path(infile)
check_path(outfile, outfile=True)
# convert wfile files to ascii
CMD = 'gdal_translate -of AAIGrid {0} {1}'.format(infile, outfile)
run_cmd(CMD)
def produce_shapefiles(watershed_tif, corrected_points,
output_dir=None, streamflow=False):
"""
Outputs the polygons of the individual subbasins to a shapfile.
Args:
watershed_tif: Path to a geotiff of the watersheds
corrected_points: Path to the corrected points used for delineation
output_dir: Output location used for producing shapefiles
"""
# Check files
check_path(watershed_tif)
check_path(corrected_points)
wfname = os.path.basename(watershed_tif).split('.')[0] + '.shp'
# Polygonize creates a raster with all subbasins
watershed_shp = os.path.join(output_dir, wfname)
CMD = 'gdal_polygonize.py -f "ESRI SHAPEFILE" {} {}'.format(watershed_tif,
watershed_shp)
run_cmd(CMD)
# Read in and identify the names of the pour points with the subbasins
ptdf = gpd.read_file(corrected_points)
wdf = gpd.read_file(watershed_shp)
# Identify the name and output the individual basins
for nm, pt in zip(ptdf['Primary ID'].values, ptdf['geometry'].values):
for pol, idx in zip(wdf['geometry'].values, wdf.index):
if pt.within(pol):
# Create a new dataframe and output it
df = gpd.GeoDataFrame(columns=wdf.columns, crs=wdf.crs)
df = df.append(wdf.loc[idx])
out.msg("Creating the subbasin outline for {}...".format(nm))
df.to_file(os.path.join(output_dir, '{}_subbasin.shp'
''.format(
(nm.lower()).replace(' ', '_'))
))
# Output the full basin outline
out.msg("Creating the entire basin outline...")
same = np.ones(len(wdf.index))
wdf['all'] = same
basin_outline = wdf.dissolve(by='all')
basin_outline.to_file(os.path.join(output_dir, 'basin_outline.shp'))
return watershed_shp
def create_readme(sysargs, output_dir):
"""
Creates a readme with all the details for creating the files
Args:
sysargs: command used for generating files
"""
dt = ((datetime.datetime.today()).isoformat()).split('T')[0]
out_str = (
"###################################################################\n"
"# BASIN DELINEATION TOOL V{0}\n"
"###################################################################\n"
"\n The files in this folder were generated on {1}.\n"
"This was accomplished using the following command:\n"
"\n$ {2}\n"
"\nTo get access to the source code please visit:\n"
"https://github.com/USDA-ARS-NWRC/basin_setup")
out_str = out_str.format(__version__, dt, ' '.join(sys.argv))
with open(os.path.join(output_dir, 'README.txt'), 'w') as fp:
fp.write(out_str)
fp.close()
def cleanup(output_dir, at_start=False):
"""
Removes the temp folder and removes the following files:
* output/watersheds.shp
* output/*_subbasin.shp
* output/basin_outline.shp
* output/corrected_points.shp
Args:
output_dir: folder to lookin for cleanup
at_start: If at the beginning we cleanup a lot more files versus
than at the end of a run.
"""
out.msg("Cleaning up files...")
# Always cleanup the temp folder
temp = os.path.join(output_dir, 'temp')
if os.path.isdir(temp):
shutil.rmtree(temp)
if at_start:
# Remove any potential streamflow folders
streamflow = os.path.join(output_dir, 'streamflow')
if os.path.isdir(streamflow):
shutil.rmtree(streamflow)
fnames = os.listdir(output_dir)
for f in fnames:
fn = os.path.join(output_dir, f)
if ("_subbasin." in f or "thresh" in f or "basin_outline." in f
or 'watersheds_' in f or 'out.' in f
or "corrected_points_" in f):
out.dbg("Removing {}".format(f))
os.remove(fn)
def confirm_norerun(non_thresholdkeys, imgs):
"""
Checks if the non-thresholded files exist, if so confirm the user wants
to overwrite them.
Args:
non-thresholdedkeys: keys to check in the imgs dictionary of paths
imgs: Dictionary of paths to images
Returns
bool: Indicating whether we continue or not
"""
out.dbg("Checking if important delineation images pre-exist...")
# Quickly check if the user wants to over write a possible rerun
move_forward = False
any_file_exists = False
for f in non_thresholdkeys:
if os.path.isfile(imgs[f]):
out.dbg("{} image exists!".format(f))
any_file_exists = True
out.warn("You are about to overwrite the delineation files that"
" take the longest to make. \n\nAre you sure you want to"
" do this? (y/n)\n")
answer = input()
acceptable_answer = False
while not acceptable_answer:
if answer.lower() == 'y':
acceptable_answer = True
move_forward = True
elif answer.lower() == 'n':
acceptable_answer = True
else:
acceptable_answer = False
break
# If there weren't any files then move ahead
if not any_file_exists:
move_forward = True
out.dbg("No pre-existing files, moving forward...")
return move_forward
def create_ars_streamflow_files(treefile, coordfile, threshold, wshp, netdir,
output='basin_catchments.csv'):
"""
Takes in the Tree file and the Coordinates file to produce a csv of the
downstream catchment, the elevation of a catchment, and contributing area
"""
today = (datetime.datetime.today().date()).isoformat()
header = ("#############################################################\n"
" Basin Catchment File for USDA-ARS-NWRC Streamflow modeling. \n"
" Delineatation Threshold: {}\n"
" Date Created: {}\n"
" Created using basin_setup v{}\n"
"#############################################################\n"
"\n".format(threshold, today,
__version__)
)
with open(output, 'w+') as fp:
fp.write(header)
fp.close()
# tree_names = ['link', 'start number', 'end number', 'downstream',
# 'upstream',
# 'strahler',
# 'monitor point',
# 'network magnitude']
# coord_names = ['dummy', 'x', 'y', 'distance', 'elevation', 'area']
# dftree = pd.read_csv(treefile, delimiter='\t', names=tree_names)
# dfcoord = pd.read_csv(coordfile, delimiter='\t', names=coord_names)
dfwshp = gpd.read_file(wshp)
# Get the network shpapefile which lives under a folder named after the
# tif.
name = os.path.split(netdir)[-1].split('.')[0] + '.shp'
netshp = os.path.join(netdir, name)
dfnet = gpd.read_file(netshp)
dfnet = dfnet.set_index('WSNO')
# Collect the area of each basin
dfwshp['area'] = dfwshp.area
# handle individual cells acting as subbasins
dfwshp = dfwshp.groupby('DN').sum()
# Collect down stream info.
dfwshp['downstream'] = dfnet['DSLINKNO']
dfwshp.to_csv(output, mode='a')
def output_streamflow(imgs, threshold, wshp, temp="temp",
output_dir='streamflow'):
"""
Outputs files necessary for streamflow modeling. This will create a file
structure under a folder defined by output_dir and the threshold.
E.g. streamflow/thresh_10000000
Args:
imgs: Dictionary containing a files to be outputted.
threshold: threshold used for creating subbasins
wshp: Watershed shapefile
output_dir: Location to output files
"""
# Dictionary to grab filenames for ARS streamflow
dat = {}
out.msg("Creating streamflow files...")
final_output = os.path.join(output_dir, "thresh_{}".format(threshold))
if not os.path.isdir(output_dir):
out.msg("Making streamflow directory")
os.mkdir(output_dir)
if not os.path.isdir(final_output):
out.msg("Making streamflow threshold directory...")
os.mkdir(final_output)
# Convert the watersheds to ascii and move files to streamflow folder for
# SLF streamflow
for k in ['corrected_points', 'watersheds', 'coord', 'tree']:
name = os.path.basename(imgs[k])
outfile = os.path.join(final_output, k + "." + name.split('.')[-1])
# Handle grabbing data for outputing ARS streamflow
if k in ['tree', 'coord']:
dat[k] = outfile
if k == 'watersheds':
outfile = os.path.join(final_output, k + '.asc')
convert2ascii(imgs[k], outfile)
else:
shutil.copy(imgs[k], outfile)
# Copy over threshold files
for f in os.listdir(imgs['net']):
to_f = os.path.join(final_output, os.path.basename(f))
shutil.copy(os.path.join(imgs["net"], f), to_f)
# Create the files for ARS Streamflow
create_ars_streamflow_files(dat['tree'],
dat['coord'],
threshold,
wshp,
imgs['net'],
output=os.path.join(final_output,
'basin_catchments.csv'))
def ernestafy(demfile, pour_points, output=None, temp=None, threshold=100,
rerun=False,
nthreads=None,
out_streams=False):
"""
Run TauDEM using the script Ernesto Made.... therefore we will
ernestafy this basin.
Args:
demfile: Original DEM tif.
pour_points: Locations of the pour_points in a .bna file format
output: Output folder location, default is ./delineation
threshold: Threshold to use, can be a list or a single value
rerun: boolean indicating whether to avoid re-doing steps 1-3
out_streams: Boolean determining whether to output the files for
streamflow modeling
"""
create_readme(sys.argv, output)
# Output File keys without a threshold in the filename
non_thresholdkeys = ['filled', 'flow_dir', 'slope', 'drain_area',
'basin_drain_area']
# Output File keys WITH a threshold in the filename
thresholdkeys = ['thresh_streams', 'thresh_basin_streams', 'order', 'tree',
'coord', 'net', 'watersheds', 'basin_outline',
'corrected_points']
filekeys = non_thresholdkeys + thresholdkeys
# Create file paths for the output file management
imgs = {}
for k in filekeys:
base = os.path.join(output, k)
# Add the threshold to the filename if need be
if k in thresholdkeys:
base = os.path.join(temp, k)
base += '_thresh_{}'.format(threshold)
# Watchout for shapefiles
if 'points' in k:
imgs[k] = base + '.shp'
# Files we need for streamflow
elif k in ['coord', 'tree']:
imgs[k] = base + '.dat'
else:
imgs[k] = base + '.tif'
# This file if it already exists causes problems
if os.path.isfile(imgs['net']):
out.msg("Removing pre-existing stream network file...")
os.remove(imgs['net'])
# If we rerun we don't want to run steps 1-3 again
if rerun:
out.warn("Performing a rerun, assuming files for flow direction and"
" accumulation exist...")
else:
move_forward = confirm_norerun(non_thresholdkeys, imgs)
if move_forward:
# 1. Pit Remove in order to fill the pits in the DEM
pitremove(demfile, outfile=imgs['filled'], nthreads=nthreads)
# 2. D8 Flow Directions in order to compute the flow direction in
# each DEM cell
calcD8Flow(imgs['filled'], d8dir_file=imgs['flow_dir'],
d8slope_file=imgs['slope'],
nthreads=nthreads)
# 3. D8 Contributing Area so as to compute the drainage area in
# each DEM cell
calcD8DrainageArea(imgs['flow_dir'], areaD8_out=imgs['drain_area'],
nthreads=nthreads)
else:
out.msg("Please use the '--rerun' flag to perform a rerun.\n")
sys.exit()
##########################################################################
# This section and below gets run every call. (STEPS 4-8)
##########################################################################
# 4. Stream Definition by Threshold, in order to extract a first version of
# the stream network
defineStreamsByThreshold(imgs['drain_area'],
threshold_streams_out=imgs['thresh_streams'],
threshold=threshold, nthreads=nthreads)
# 5. Move Outlets to Streams, so as to move the catchment outlet point on
# one of the DEM cells identified by TauDEM as belonging to the stream
# network
outlets_2_streams(imgs['flow_dir'], imgs['thresh_streams'], pour_points,
new_pour_points=imgs['corrected_points'],
nthreads=nthreads)
# 6. D8 Contributing Area again, but with the catchment outlet point as
# additional input data
calcD8DrainageAreaBasin(imgs['flow_dir'], imgs['corrected_points'],
areaD8_out=imgs['basin_drain_area'],
nthreads=nthreads)
# 7. Stream Definition by Threshold again, but with the catchment outlet
# point as additional input data
defineStreamsByThreshold(imgs['basin_drain_area'],
threshold_streams_out=imgs['thresh_basin_streams'], # noqa
threshold=threshold,
nthreads=nthreads)
# 8. Stream Reach And Watershed
delineate_streams(demfile, imgs['flow_dir'], imgs['basin_drain_area'],
imgs['thresh_basin_streams'], imgs['corrected_points'],
stream_orderfile=imgs['order'], treefile=imgs['tree'],
coordfile=imgs['coord'], netfile=imgs['net'],
wfile=imgs['watersheds'], nthreads=nthreads)
# Output the shapefiles of the watershed
wshp = produce_shapefiles(imgs['watersheds'], imgs['corrected_points'],
output_dir=output)
if out_streams:
output_streamflow(imgs, threshold, wshp, temp=temp,
output_dir=os.path.join(output, 'streamflow'))
def main():
p = argparse.ArgumentParser(description='Delineates a new basin for'
' SMRF/AWSM/Streamflow')
p.add_argument("-d", "--dem", dest="dem",
required=True,
help="Path to dem")
p.add_argument("-p", "--pour", dest="pour_points",
required=True,
help="Path to a .bna of pour points")
p.add_argument("-o", "--output", dest="output",
required=False,
help="Path to output folder")
p.add_argument("-t", "--threshold", dest="threshold",
nargs="+", default=[100],
help="List of thresholds to use for defining streams from"
" the flow accumulation, default=100")
p.add_argument("-n", "--nthreads", dest="nthreads",
required=False,
help="Cores to use when processing the data")
p.add_argument("-re", "--rerun", dest="rerun",
required=False, action='store_true',
help="Boolean Flag that determines whether to run the "
"script from the beginning or assume that the flow "
"accumulation has been completed once")
p.add_argument("-db", "--debug", dest="debug",
required=False, action='store_true')
p.add_argument('-strm', '--streamflow', dest='streamflow', required=False,
action='store_true', help='Use to'
' output the necessary files for'
' streamflow modeling')
args = p.parse_args()
# Global debug variable
global DEBUG
DEBUG = args.debug
start = time.time()
# Print a nice header
msg = "Basin Delineation Tool v{0}".format(__version__)
m = "=" * (2 * len(msg) + 1)
out.msg(m, 'header')
out.msg(msg, 'header')
out.msg(m, 'header')
rerun = args.rerun
# Make sure our output folder exists
if args.output is None:
output = './delineation'
else:
output = args.output
temp = os.path.join(output, 'temp')
if not os.path.isdir(output):
os.mkdir(output)
else:
cleanup(output, at_start=True)
if not os.path.isdir(temp):
os.mkdir(temp)
# Cycle through all the thresholds provided
for i, tr in enumerate(args.threshold):
if i > 0:
rerun = True
ernestafy(args.dem, args.pour_points, output=output, temp=temp,
threshold=tr,
rerun=rerun,
nthreads=args.nthreads,
out_streams=args.streamflow)
if not args.debug:
cleanup(output, at_start=False)
stop = time.time()
out.msg("Basin Delineation Complete. Elapsed Time {0}s".format(
int(stop - start)))
if __name__ == '__main__':
main()
| 3,669
| 1,542
| 46
|
8907c9f34efb220b34e310d040d510f1d54753e0
| 1,565
|
py
|
Python
|
mlxtk/tools/entropy.py
|
f-koehler/mlxtk
|
373aed06ab23ab9b70cd99e160228c50b87e939a
|
[
"MIT"
] | 2
|
2018-12-21T19:41:10.000Z
|
2019-11-25T15:26:27.000Z
|
mlxtk/tools/entropy.py
|
f-koehler/mlxtk
|
373aed06ab23ab9b70cd99e160228c50b87e939a
|
[
"MIT"
] | 73
|
2017-12-22T13:30:16.000Z
|
2022-02-22T04:21:14.000Z
|
mlxtk/tools/entropy.py
|
f-koehler/mlxtk
|
373aed06ab23ab9b70cd99e160228c50b87e939a
|
[
"MIT"
] | null | null | null |
"""Compute entropy
"""
from typing import Union
import numpy
from mlxtk.log import get_logger
def compute_entropy(
natpop: numpy.ndarray, normalize: bool = False
) -> Union[numpy.ndarray, numpy.float64]:
"""Compute the Boltzmann entropy from natural populations.
The entropy is computed using the formula
:math:`S_\\mathrm{B}=-\\sum\\limits_{i}\\lambda_i\\ln(\\lambda_i)`.
Arguments:
natpop (numpy.ndarray): one- or two-dimensional array containing
natural populations
Returns:
Boltzmann entropy
"""
if len(natpop.shape) == 1:
result = 0.0
for lam in natpop:
if lam != 0.0:
result -= lam * numpy.log(lam)
if normalize:
m = natpop.shape[0]
if m == 1:
raise ZeroDivisionError("cannot normalize entropy for m=1")
else:
S_max = numpy.log(m)
result = result / S_max
return result
if len(natpop.shape) == 2:
result = numpy.zeros(natpop.shape[0])
for i in range(natpop.shape[0]):
for lam in natpop[i]:
if lam != 0.0:
result[i] -= lam * numpy.log(lam)
if normalize:
m = natpop.shape[1]
if m == 1:
raise ZeroDivisionError("cannot normalize entropy for m=1")
else:
S_max = numpy.log(m)
result = result / S_max
return result
raise ValueError("natpop must be either 1- or 2-dimensional")
| 26.525424
| 75
| 0.547604
|
"""Compute entropy
"""
from typing import Union
import numpy
from mlxtk.log import get_logger
def compute_entropy(
natpop: numpy.ndarray, normalize: bool = False
) -> Union[numpy.ndarray, numpy.float64]:
"""Compute the Boltzmann entropy from natural populations.
The entropy is computed using the formula
:math:`S_\\mathrm{B}=-\\sum\\limits_{i}\\lambda_i\\ln(\\lambda_i)`.
Arguments:
natpop (numpy.ndarray): one- or two-dimensional array containing
natural populations
Returns:
Boltzmann entropy
"""
if len(natpop.shape) == 1:
result = 0.0
for lam in natpop:
if lam != 0.0:
result -= lam * numpy.log(lam)
if normalize:
m = natpop.shape[0]
if m == 1:
raise ZeroDivisionError("cannot normalize entropy for m=1")
else:
S_max = numpy.log(m)
result = result / S_max
return result
if len(natpop.shape) == 2:
result = numpy.zeros(natpop.shape[0])
for i in range(natpop.shape[0]):
for lam in natpop[i]:
if lam != 0.0:
result[i] -= lam * numpy.log(lam)
if normalize:
m = natpop.shape[1]
if m == 1:
raise ZeroDivisionError("cannot normalize entropy for m=1")
else:
S_max = numpy.log(m)
result = result / S_max
return result
raise ValueError("natpop must be either 1- or 2-dimensional")
| 0
| 0
| 0
|
96be032b76e4f0245c5919da7cc7fc9aa378da31
| 2,678
|
py
|
Python
|
lsassy/impacketfile.py
|
pretech86/lsassy
|
8935fb6f0829055820e1c3d79016f522d04e3b6e
|
[
"MIT"
] | null | null | null |
lsassy/impacketfile.py
|
pretech86/lsassy
|
8935fb6f0829055820e1c3d79016f522d04e3b6e
|
[
"MIT"
] | null | null | null |
lsassy/impacketfile.py
|
pretech86/lsassy
|
8935fb6f0829055820e1c3d79016f522d04e3b6e
|
[
"MIT"
] | 1
|
2020-05-14T23:20:30.000Z
|
2020-05-14T23:20:30.000Z
|
# Author:
# Romain Bentz (pixis - @hackanddo)
# Website:
# https://beta.hackndo.com
| 34.333333
| 155
| 0.584765
|
# Author:
# Romain Bentz (pixis - @hackanddo)
# Website:
# https://beta.hackndo.com
class ImpacketFile:
def __init__(self):
self._conn = None
self._fpath = None
self._currentOffset = 0
self._total_read = 0
self._tid = None
self._fid = None
self._buffer_min_size = 1024 * 8
self._buffer_data = {
"offset": 0,
"size": 0,
"buffer": ""
}
def open(self, connection, share_name, fpath):
self._conn = connection
self._fpath = fpath
self._tid = self._conn.connectTree(share_name)
self._fid = self._conn.openFile(self._tid, self._fpath)
self._fileInfo = self._conn.queryInfo(self._tid, self._fid)
self._endOfFile = self._fileInfo.fields["EndOfFile"]
def __exit__(self, exc_type, exc_val, exc_tb):
self._conn.close()
def read(self, size):
if size == 0:
return b''
if (self._buffer_data["offset"] <= self._currentOffset <= self._buffer_data["offset"] + self._buffer_data["size"]
and self._buffer_data["offset"] + self._buffer_data["size"] > self._currentOffset + size):
value = self._buffer_data["buffer"][self._currentOffset - self._buffer_data["offset"]:self._currentOffset - self._buffer_data["offset"] + size]
else:
self._buffer_data["offset"] = self._currentOffset
"""
If data size is too small, read self._buffer_min_size bytes and cache them
"""
if size < self._buffer_min_size:
value = self._conn.readFile(self._tid, self._fid, self._currentOffset, self._buffer_min_size)
self._buffer_data["size"] = self._buffer_min_size
self._total_read += self._buffer_min_size
else:
value = self._conn.readFile(self._tid, self._fid, self._currentOffset, size + self._buffer_min_size)
self._buffer_data["size"] = size + self._buffer_min_size
self._total_read += size
self._buffer_data["buffer"] = value
self._currentOffset += size
return value[:size]
def close(self):
self._conn.close()
def seek(self, offset, whence=0):
if whence == 0:
self._currentOffset = offset
elif whence == 1:
self._currentOffset += offset
elif whence == 2:
self._currentOffset = self._endOfFile - offset
else:
raise Exception('Seek function whence value must be between 0-2')
def tell(self):
return self._currentOffset
| 2,382
| -2
| 211
|
5a9c2e159124fd70adde5ed274b2d6ed828a3ae4
| 116
|
py
|
Python
|
freiner/errors.py
|
djmattyg007/freiner
|
4acff72c55c37495862ea642a70b443da1278894
|
[
"MIT"
] | null | null | null |
freiner/errors.py
|
djmattyg007/freiner
|
4acff72c55c37495862ea642a70b443da1278894
|
[
"MIT"
] | null | null | null |
freiner/errors.py
|
djmattyg007/freiner
|
4acff72c55c37495862ea642a70b443da1278894
|
[
"MIT"
] | null | null | null |
class FreinerConfigurationError(Exception):
"""
Raised when a configuration problem is encountered.
"""
| 23.2
| 55
| 0.715517
|
class FreinerConfigurationError(Exception):
"""
Raised when a configuration problem is encountered.
"""
| 0
| 0
| 0
|
0587740f1953f3e18ed695131ebf5a181e2171cc
| 3,684
|
py
|
Python
|
validation_tests/reports/validations_produce_results.py
|
samcom12/anuga_core
|
f4378114dbf02d666fe6423de45798add5c42806
|
[
"Python-2.0",
"OLDAP-2.7"
] | 136
|
2015-05-07T05:47:43.000Z
|
2022-02-16T03:07:40.000Z
|
validation_tests/reports/validations_produce_results.py
|
samcom12/anuga_core
|
f4378114dbf02d666fe6423de45798add5c42806
|
[
"Python-2.0",
"OLDAP-2.7"
] | 184
|
2015-05-03T09:27:54.000Z
|
2021-12-20T04:22:48.000Z
|
validation_tests/reports/validations_produce_results.py
|
samcom12/anuga_core
|
f4378114dbf02d666fe6423de45798add5c42806
|
[
"Python-2.0",
"OLDAP-2.7"
] | 70
|
2015-03-18T07:35:22.000Z
|
2021-11-01T07:07:29.000Z
|
"""
Script to run all the produce_results scripts in the
validation_tests/xxx/xxx/ directories
"""
import os
import time
import anuga
from anuga import indent
#from anuga.validation_utilities.parameters import alg
#from anuga.validation_utilities.parameters import cfl
args = anuga.get_args()
alg = args.alg
np = args.np
verbose = args.verbose
#---------------------------------
# Get the current svn revision
#---------------------------------
timestamp = time.asctime()
major_revision = anuga.get_version()
try:
# This fails if using git for version control
minor_revision = anuga.get_revision_number()
except:
try:
# This works when using git on unix
minor_revision = os.popen("git show-ref --head -s | head -n1").read().strip()
except:
# This is a fallback position
minor_revision = 'unknown'
#----------------------------------
# Now it is ok to create the latex
# macro file with run parameters
#
# FIXME: THis is a little dangerous as
# this is changed before all the tests
# are run.
#----------------------------------
f = open('saved_parameters.tex', 'w')
#f.write('\\newcommand{\\cfl}{\\UScore{%s}}\n' % str(cfl))
f.write('\\newcommand{\\alg}{\\UScore{%s}}\n' % str(alg))
f.write('\\newcommand{\\majorR}{\\UScore{%s}}\n' % str(major_revision))
f.write('\\newcommand{\\minorR}{\\UScore{%s}}\n' % str(minor_revision))
f.write('\\newcommand{\\timeR}{{%s}}\n' % str(timestamp))
f.close()
#---------------------------------
# Run the tests
#---------------------------------
os.chdir('..')
buildroot = os.getcwd()
Upper_dirs = os.listdir('.')
dir = '.'
Upper_dirs = [name for name in os.listdir(dir) if os.path.isdir(os.path.join(dir, name))]
try:
Upper_dirs.remove('.svn')
except ValueError:
pass
try:
Upper_dirs.remove('reports')
except ValueError:
pass
try:
Upper_dirs.remove('case_studies')
except ValueError:
pass
#print Upper_dirs
#os.chdir('./Tests')
#print 'Tests'
print(Upper_dirs)
time_total = 0.0
test_number = 1
for dir in Upper_dirs:
os.chdir(dir)
print(72 * '=')
print('Directory: ' + dir)
print(72 * '=')
#print 'Changing to', os.getcwd()
dir = '.'
Lower_dirs = [name for name in os.listdir(dir) if os.path.isdir(os.path.join(dir, name))]
try:
Lower_dirs.remove('.svn')
except ValueError:
pass
#print Lower_dirs
for l_dir in Lower_dirs:
os.chdir(l_dir)
#print os.getcwd()
print(60 * '=')
print('Subdirectory %g: '% (test_number) + l_dir)
test_number += 1
print(60 * '=')
try:
t0 = time.time()
if verbose:
cmd = 'python produce_results.py -alg %s -np %s -v '% (str(alg),str(np))
else:
cmd = 'python produce_results.py -alg %s -np %s '% (str(alg),str(np))
print(2 * indent + 'Running: ' + cmd)
os.system(cmd)
t1 = time.time() - t0
time_total += t1
print(2 * indent + 'That took ' + str(t1) + ' secs')
except:
print(2 * indent + 'Failed running produce_results in ' + os.getcwd())
pass
os.chdir('..')
#print 'Changing to', os.getcwd()
os.chdir('..')
#print 'Changing to', os.getcwd()
os.chdir(buildroot)
print(72 * '=')
print('That took ' + str(time_total) + ' secs')
print(72 * '=')
# go back to reports directory to typeset report
os.chdir('reports')
os.system('python validations_typeset_report.py')
import subprocess
cmd = 'mv validations_report.pdf validations_report_alg_%s.pdf' % (str(alg))
print(cmd)
subprocess.call([cmd], shell=True)
| 23.316456
| 94
| 0.58089
|
"""
Script to run all the produce_results scripts in the
validation_tests/xxx/xxx/ directories
"""
import os
import time
import anuga
from anuga import indent
#from anuga.validation_utilities.parameters import alg
#from anuga.validation_utilities.parameters import cfl
args = anuga.get_args()
alg = args.alg
np = args.np
verbose = args.verbose
#---------------------------------
# Get the current svn revision
#---------------------------------
timestamp = time.asctime()
major_revision = anuga.get_version()
try:
# This fails if using git for version control
minor_revision = anuga.get_revision_number()
except:
try:
# This works when using git on unix
minor_revision = os.popen("git show-ref --head -s | head -n1").read().strip()
except:
# This is a fallback position
minor_revision = 'unknown'
#----------------------------------
# Now it is ok to create the latex
# macro file with run parameters
#
# FIXME: THis is a little dangerous as
# this is changed before all the tests
# are run.
#----------------------------------
f = open('saved_parameters.tex', 'w')
#f.write('\\newcommand{\\cfl}{\\UScore{%s}}\n' % str(cfl))
f.write('\\newcommand{\\alg}{\\UScore{%s}}\n' % str(alg))
f.write('\\newcommand{\\majorR}{\\UScore{%s}}\n' % str(major_revision))
f.write('\\newcommand{\\minorR}{\\UScore{%s}}\n' % str(minor_revision))
f.write('\\newcommand{\\timeR}{{%s}}\n' % str(timestamp))
f.close()
#---------------------------------
# Run the tests
#---------------------------------
os.chdir('..')
buildroot = os.getcwd()
Upper_dirs = os.listdir('.')
dir = '.'
Upper_dirs = [name for name in os.listdir(dir) if os.path.isdir(os.path.join(dir, name))]
try:
Upper_dirs.remove('.svn')
except ValueError:
pass
try:
Upper_dirs.remove('reports')
except ValueError:
pass
try:
Upper_dirs.remove('case_studies')
except ValueError:
pass
#print Upper_dirs
#os.chdir('./Tests')
#print 'Tests'
print(Upper_dirs)
time_total = 0.0
test_number = 1
for dir in Upper_dirs:
os.chdir(dir)
print(72 * '=')
print('Directory: ' + dir)
print(72 * '=')
#print 'Changing to', os.getcwd()
dir = '.'
Lower_dirs = [name for name in os.listdir(dir) if os.path.isdir(os.path.join(dir, name))]
try:
Lower_dirs.remove('.svn')
except ValueError:
pass
#print Lower_dirs
for l_dir in Lower_dirs:
os.chdir(l_dir)
#print os.getcwd()
print(60 * '=')
print('Subdirectory %g: '% (test_number) + l_dir)
test_number += 1
print(60 * '=')
try:
t0 = time.time()
if verbose:
cmd = 'python produce_results.py -alg %s -np %s -v '% (str(alg),str(np))
else:
cmd = 'python produce_results.py -alg %s -np %s '% (str(alg),str(np))
print(2 * indent + 'Running: ' + cmd)
os.system(cmd)
t1 = time.time() - t0
time_total += t1
print(2 * indent + 'That took ' + str(t1) + ' secs')
except:
print(2 * indent + 'Failed running produce_results in ' + os.getcwd())
pass
os.chdir('..')
#print 'Changing to', os.getcwd()
os.chdir('..')
#print 'Changing to', os.getcwd()
os.chdir(buildroot)
print(72 * '=')
print('That took ' + str(time_total) + ' secs')
print(72 * '=')
# go back to reports directory to typeset report
os.chdir('reports')
os.system('python validations_typeset_report.py')
import subprocess
cmd = 'mv validations_report.pdf validations_report_alg_%s.pdf' % (str(alg))
print(cmd)
subprocess.call([cmd], shell=True)
| 0
| 0
| 0
|
87f73a334314740813918568d6ad3d1326a6215b
| 103
|
py
|
Python
|
playserver/__init__.py
|
ollien/playserver
|
9043c4ae92e382a4c5f83cfd77db21b0dff39404
|
[
"MIT"
] | null | null | null |
playserver/__init__.py
|
ollien/playserver
|
9043c4ae92e382a4c5f83cfd77db21b0dff39404
|
[
"MIT"
] | null | null | null |
playserver/__init__.py
|
ollien/playserver
|
9043c4ae92e382a4c5f83cfd77db21b0dff39404
|
[
"MIT"
] | null | null | null |
from . import track
from . import webserver
from . import tracksocketserver
from . import globalconfig
| 20.6
| 31
| 0.805825
|
from . import track
from . import webserver
from . import tracksocketserver
from . import globalconfig
| 0
| 0
| 0
|
aee842451f6eade6db5c2978ec2bdca97f54e1b7
| 341
|
py
|
Python
|
Courses/Codeval/Easy/Python/48-MultiplyList.py
|
leparrav/Playground
|
dcb90a2dd2bc1867511cfe621eb21248a60e357f
|
[
"Unlicense"
] | 1
|
2019-02-13T12:02:26.000Z
|
2019-02-13T12:02:26.000Z
|
Courses/Codeval/Easy/Python/48-MultiplyList.py
|
leparrav/Playground
|
dcb90a2dd2bc1867511cfe621eb21248a60e357f
|
[
"Unlicense"
] | 1
|
2018-08-13T15:58:33.000Z
|
2018-08-13T15:58:33.000Z
|
Courses/Codeval/Easy/Python/48-MultiplyList.py
|
leparrav/Playground
|
dcb90a2dd2bc1867511cfe621eb21248a60e357f
|
[
"Unlicense"
] | 2
|
2017-08-10T20:01:29.000Z
|
2021-07-01T08:39:13.000Z
|
'''
https://www.codeeval.com/open_challenges/113/
'''
import sys
if __name__ == '__main__':
main()
| 20.058824
| 61
| 0.604106
|
'''
https://www.codeeval.com/open_challenges/113/
'''
import sys
def main():
with open(sys.argv[1],'r') as word_file:
for line in word_file.readlines():
line = line.strip().split("|")
v1 = line[0].split()
v2 = line[1].split()
print " ".join([str(int(a)*int(b)) for a,b in zip(v1,v2)])
if __name__ == '__main__':
main()
| 213
| 0
| 23
|
2dcd7277bafb8be6b517570230e90076a4f3dbdc
| 3,014
|
py
|
Python
|
src/vm.py
|
obs145628/py-calculatrice
|
d833307b79c01b046e82204f65334be28b1af2a0
|
[
"MIT"
] | null | null | null |
src/vm.py
|
obs145628/py-calculatrice
|
d833307b79c01b046e82204f65334be28b1af2a0
|
[
"MIT"
] | null | null | null |
src/vm.py
|
obs145628/py-calculatrice
|
d833307b79c01b046e82204f65334be28b1af2a0
|
[
"MIT"
] | null | null | null |
_ntypes = 0
TYPE_VOID = add_type()
TYPE_INT = add_type()
TYPE_FLOAT = add_type()
TYPE_STRING = add_type()
TYPE_LVALUE = add_type()
TYPE_FUNCTION = add_type()
_funs = dict()
| 21.683453
| 85
| 0.618447
|
_ntypes = 0
def add_type():
global _ntypes
res = _ntypes
_ntypes += 1
return res
TYPE_VOID = add_type()
TYPE_INT = add_type()
TYPE_FLOAT = add_type()
TYPE_STRING = add_type()
TYPE_LVALUE = add_type()
TYPE_FUNCTION = add_type()
class Value:
def __init__(self, type):
self.type = type
class ValueVoid(Value):
def __init__(self):
Value.__init__(self, TYPE_VOID)
class ValueInt(Value):
def __init__(self, val):
Value.__init__(self, TYPE_INT)
self.val = int(val)
class ValueFloat(Value):
def __init__(self, val):
Value.__init__(self, TYPE_FLOAT)
self.val = float(val)
class ValueString(Value):
def __init__(self, val):
Value.__init__(self, TYPE_STRING)
self.val = str(val)
class ValueLval(Value):
def __init__(self, val):
Value.__init__(self, TYPE_LVALUE)
self.val = str(val)
class VMFunction:
def __init__(self, native_fun, first_type = None):
self.native_fun = native_fun
self.first_type = first_type
_funs = dict()
def declare_fun(name, fun):
global _funs
first_type = fun.first_type
if name not in _funs:
_funs[name] = (None, dict())
fun_obj = _funs[name]
if first_type is None:
if len(fun_obj[1]) != 0 or fun_obj[0] is not None:
raise Exception("Can't declare native normal functions: already exists")
fun_obj[0] = fun
else:
if fun_obj[0] is not None:
raise Exception("Can't declare native special functions: already exists")
fun_obj[1][first_type] = fun
def get_fun(name, args):
global _funs
if name not in _funs:
return None
fun_obj = _funs[name]
if fun_obj[0] is None:
first_type = args[0].type
return fun_obj[1][first_type] if first_type in fun_obj[1] else None
else:
return fun_obj[0]
class VM:
def __init__(self):
self.vars = dict()
self.funs = dict()
def load_var(self, name):
return self.vars[name] if name in self.vars else None
def store_var(self, name, val):
self.vars[name] = val
def clear_var(self, name):
del self.vars[name]
def exec_fun(self, name, args):
fun = get_fun(name, args)
if fun is None:
raise Exception("Runtime Error: call to " + name + " failed")
return fun.native_fun(args)
class FunTypeMatcher:
def __init__(self, arg_num, funs_dict, fun_default = None):
self.arg_num = arg_num
self.funs_dict = funs_dict
self.fun_default = fun_default
def __call__(self, args):
if self.arg_num >= len(args):
raise Exception('Invalid number of arguments')
type = args[self.arg_num].type
if type in self.funs_dict:
return self.funs_dict[type](args)
if self.fun_default != None:
return self.fun_default(args)
else:
raise Exception('Invalid types for arguments')
| 2,186
| -13
| 653
|
2d27dc35e61ff2ac02f3bb9cdf6c137579530aea
| 91
|
py
|
Python
|
backend/battling/apps.py
|
eduardavercosa/theDudaProject
|
af8332211378e449447538c0a6d610eaec6e1e1a
|
[
"MIT"
] | null | null | null |
backend/battling/apps.py
|
eduardavercosa/theDudaProject
|
af8332211378e449447538c0a6d610eaec6e1e1a
|
[
"MIT"
] | 13
|
2021-03-31T15:19:02.000Z
|
2021-07-30T20:24:40.000Z
|
backend/battling/apps.py
|
eduardavercosa/theDudaProject
|
af8332211378e449447538c0a6d610eaec6e1e1a
|
[
"MIT"
] | 2
|
2021-03-25T20:13:58.000Z
|
2021-03-25T20:25:15.000Z
|
from django.apps import AppConfig
| 15.166667
| 33
| 0.758242
|
from django.apps import AppConfig
class BattlingConfig(AppConfig):
name = "battling"
| 0
| 33
| 23
|
14df3965a52ef794a701a973e8584d6f6bac4cc0
| 2,774
|
py
|
Python
|
Examples/ExampleScripts/PrevNextGlyph.py
|
andyclymer/ControlBoard
|
e9b56341c38b982fe22db4e40a86c6b219c85d7e
|
[
"MIT"
] | 21
|
2015-03-06T12:02:18.000Z
|
2021-03-28T16:44:11.000Z
|
Examples/ExampleScripts/PrevNextGlyph.py
|
andyclymer/ControlBoard
|
e9b56341c38b982fe22db4e40a86c6b219c85d7e
|
[
"MIT"
] | 1
|
2015-03-07T00:14:24.000Z
|
2015-03-07T00:14:24.000Z
|
Examples/ExampleScripts/PrevNextGlyph.py
|
andyclymer/ControlBoard
|
e9b56341c38b982fe22db4e40a86c6b219c85d7e
|
[
"MIT"
] | 3
|
2015-03-06T11:36:09.000Z
|
2015-06-30T04:46:41.000Z
|
from mojo.UI import CurrentGlyphWindow
from mojo.events import addObserver, removeObserver
import vanilla
class PrevNextGlyph:
"""
ControlBoard
"PrevNextGlyph" demo
Use a Rotary Encoder component to swtich the Current Glyph Window to the previous or next glyphs.
After removing the code for the sample window, this script could be used as a Startup Script
"""
PrevNextGlyph()
| 40.202899
| 117
| 0.570656
|
from mojo.UI import CurrentGlyphWindow
from mojo.events import addObserver, removeObserver
import vanilla
class PrevNextGlyph:
"""
ControlBoard
"PrevNextGlyph" demo
Use a Rotary Encoder component to swtich the Current Glyph Window to the previous or next glyphs.
After removing the code for the sample window, this script could be used as a Startup Script
"""
def __init__(self):
self.w = vanilla.Window((100, 100), "Previous/Next Glyph")
self.w.bind("close", self.windowClosed)
self.w.open()
# When the state of any component on your board changes (button pressed, knob turned), a "ControlBoardInput"
# notification will be made. Start observing for these notifications and give a method name in this script
# to be called when the notification comes in, in this case self.controlChanged
addObserver(self, "controlChanged", "ControlBoardInput")
self.controlName = "Rotary"
def controlChanged(self, info):
# Make sure the RoboControlInput notificaiton is for the desired control name:
if info["name"] == self.controlName:
# Figure out some info about the current glyph, and current glyph window
glyph = CurrentGlyph()
font = CurrentFont()
glyphOrder = []
if font:
glyphOrder = font.lib["public.glyphOrder"]
# If there's a glyph window open:
w = CurrentGlyphWindow()
if w:
# Find this glyph's index in the glyphOrder
thisGlyphIndex = glyphOrder.index(glyph.name)
prevGlyphIndex = thisGlyphIndex - 1
nextGlyphIndex = thisGlyphIndex + 1
if nextGlyphIndex == len(glyphOrder):
nextGlyphIndex = 0
elif prevGlyphIndex < 0:
prevGlyphIndex = len(glyphOrder) - 1
# Then, find the previous/next glyph names
prevGlyphName = glyphOrder[prevGlyphIndex]
nextGlyphName = glyphOrder[nextGlyphIndex]
# Now that we know something about the current glyph and its neighbors:
if info["value"] == "cw":
# Move clockwise, next glypyh
w.setGlyphByName(nextGlyphName)
elif info["value"] == "ccw":
# Counter-clockwise, prev glyph
w.setGlyphByName(prevGlyphName)
def windowClosed(self, sender):
removeObserver(self, "ControlBoardInput")
PrevNextGlyph()
| 2,166
| 0
| 113
|
2e4f43eef799a5742b5c04f6a152c2bae78060a5
| 1,871
|
py
|
Python
|
tests/test_simsrv.py
|
erikvanmulligen/etransafe-use-scenarios
|
6ec26120315abd66efdbfee6f5acba71e698a2b4
|
[
"MIT"
] | null | null | null |
tests/test_simsrv.py
|
erikvanmulligen/etransafe-use-scenarios
|
6ec26120315abd66efdbfee6f5acba71e698a2b4
|
[
"MIT"
] | 1
|
2021-02-11T14:59:37.000Z
|
2021-02-11T14:59:37.000Z
|
tests/test_simsrv.py
|
erikvanmulligen/etransafe-use-scenarios
|
6ec26120315abd66efdbfee6f5acba71e698a2b4
|
[
"MIT"
] | null | null | null |
import pprint
import argparse
from src.knowledgehub.api import KnowledgeHubAPI
if __name__ == "__main__":
main()
| 41.577778
| 103
| 0.603421
|
import pprint
import argparse
from src.knowledgehub.api import KnowledgeHubAPI
def main():
#api = KnowledgeHubAPI(server='TEST', client_secret='39c644b3-1f23-4d94-a71f-e0fb43ebd760')
api = KnowledgeHubAPI(server='DEV', client_secret='3db5a6d7-4694-48a4-8a2e-e9c30d78f9ab')
#api.set_service('DEV')
print('before login')
api.login('tester', 'tester')
print('after login')
status = api.SimilarityService().spaces()
print(f'status={status}')
omeprazole = 'CCC1=C(C)CN(C(=O)NCCC2=CC=C(C=C2)S(=O)(=O)NC(=O)NC2CCC(C)CC2)C1-Cl'
similar_compounds = api.SimilarityService().get(omeprazole, cutoff=0.3)
if similar_compounds != None:
names = []
smiles = []
similarities = []
if ('search_results' in similar_compounds) and (len(similar_compounds['search_results']) == 1):
search_result = similar_compounds['search_results'][0]
if 'obj_nam' in search_result:
for i in range(len(search_result['obj_nam'])):
names.append(search_result['obj_nam'][i])
smiles.append(search_result['SMILES'][i])
similarities.append("{:.4f}".format(search_result['distances'][i]))
for cmp in search_result['obj_nam']:
concept = api.ChemistryService().getCompoundByName(cmp)
print(concept)
# concept = api.SemanticService().normalize(cmp, ['RxNorm'])
# if 'concepts' in concept and len(concept['concepts']) == 1:
# compoundIds.append(concept['concepts'][0]['conceptCode'])
# compoundNames.append(concept['concepts'][0]['conceptName'])
else:
print('something wrong in the result object from the similarity service')
if __name__ == "__main__":
main()
| 1,727
| 0
| 23
|
c62df33d10fca822c2f9f72c3c0f16c300609e0c
| 107
|
py
|
Python
|
day2/day2-05 lab1-1 multiple if.py
|
hajin-kim/2020-HighSchool-Python-Tutoring
|
352025a954bff37d21cc3d59e7d5e0f0269a1f17
|
[
"MIT"
] | null | null | null |
day2/day2-05 lab1-1 multiple if.py
|
hajin-kim/2020-HighSchool-Python-Tutoring
|
352025a954bff37d21cc3d59e7d5e0f0269a1f17
|
[
"MIT"
] | null | null | null |
day2/day2-05 lab1-1 multiple if.py
|
hajin-kim/2020-HighSchool-Python-Tutoring
|
352025a954bff37d21cc3d59e7d5e0f0269a1f17
|
[
"MIT"
] | null | null | null |
num1 = int( input() )
num2 = int( input() )
if num1 > 0:
if num2 > 0:
print(num1, num2)
| 15.285714
| 26
| 0.476636
|
num1 = int( input() )
num2 = int( input() )
if num1 > 0:
if num2 > 0:
print(num1, num2)
| 0
| 0
| 0
|
f30af8b892bb64dbc6995802842d8a62372c28be
| 227
|
py
|
Python
|
PyQt5/DateTime/xmas.py
|
zhaokai0402/PyQt5-Study
|
e6280fdc615e47c947c0902836350db49441e6de
|
[
"MIT"
] | null | null | null |
PyQt5/DateTime/xmas.py
|
zhaokai0402/PyQt5-Study
|
e6280fdc615e47c947c0902836350db49441e6de
|
[
"MIT"
] | null | null | null |
PyQt5/DateTime/xmas.py
|
zhaokai0402/PyQt5-Study
|
e6280fdc615e47c947c0902836350db49441e6de
|
[
"MIT"
] | null | null | null |
from PyQt5.QtCore import QDate, Qt
p1 = QDate(1996, 4, 2)
p2 = QDate(1994, 6, 13)
dayspassed = p1.daysTo(p2)
print("{0} days have passed since {1} to {2}".format(dayspassed, p1.toString(Qt.ISODate), p2.toString(Qt.ISODate)))
| 28.375
| 115
| 0.696035
|
from PyQt5.QtCore import QDate, Qt
p1 = QDate(1996, 4, 2)
p2 = QDate(1994, 6, 13)
dayspassed = p1.daysTo(p2)
print("{0} days have passed since {1} to {2}".format(dayspassed, p1.toString(Qt.ISODate), p2.toString(Qt.ISODate)))
| 0
| 0
| 0
|
8a6c28798ac5f2eea948fe552d7ed58ea3366867
| 12,696
|
py
|
Python
|
qtorch/quant/quant_function.py
|
drcut/QPyTorch
|
63c293178e8ce9e6e5b218dee96536e9c4ad1e5c
|
[
"MIT"
] | null | null | null |
qtorch/quant/quant_function.py
|
drcut/QPyTorch
|
63c293178e8ce9e6e5b218dee96536e9c4ad1e5c
|
[
"MIT"
] | null | null | null |
qtorch/quant/quant_function.py
|
drcut/QPyTorch
|
63c293178e8ce9e6e5b218dee96536e9c4ad1e5c
|
[
"MIT"
] | null | null | null |
import torch
from qtorch import Number, FixedPoint, BlockFloatingPoint, FloatingPoint
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.utils.cpp_extension import load
import os
current_path = os.path.dirname(os.path.realpath(__file__))
quant_cpu = load(
name='quant_cpu',
sources=[
os.path.join(current_path, "quant_cpu/quant_cpu.cpp"),
os.path.join(current_path, "quant_cpu/bit_helper.cpp"),
os.path.join(current_path, "quant_cpu/sim_helper.cpp"),
]
)
if torch.cuda.is_available():
quant_cuda = load(
name='quant_cuda',
sources=[
os.path.join(current_path, "quant_cuda/quant_cuda.cpp"),
os.path.join(current_path, "quant_cuda/bit_helper.cu"),
os.path.join(current_path, "quant_cuda/sim_helper.cu"),
os.path.join(current_path, "quant_cuda/block_kernel.cu"),
os.path.join(current_path, "quant_cuda/float_kernel.cu"),
os.path.join(current_path, "quant_cuda/fixed_point_kernel.cu"),
os.path.join(current_path, "quant_cuda/quant.cu"),
]
)
else:
quant_cuda = quant_cpu
__all__ = ['fixed_point_quantize', 'block_quantize', 'float_quantize', "quantizer"]
def quantizer(forward_number=None, backward_number=None,
forward_rounding="stochastic", backward_rounding="stochastic",
clamping_grad_zero=False, backward_hooks=[]):
"""
Creates a quantization function to support quantizing forward and backward process differently.
Args:
- :param: forward_number (qtorch.Number, optional) : the number format used for forward quantization.
if is None, the quantization would be a identity mapping.
- :param: backward_number (qtorch.Number, optional) : the number format used for backward quantization.
if is None, the quantization would be a identity mapping.
- :param: forward_rounding (string) : rounding mode, \"stochastic\" or \"nearest\" (default: \"stochastic\")
- :param: backward_rounding (string) : rounding mode, \"stochastic\" or \"nearest\" (default: \"stochastic\")
- :param: clamping_grad_zero (bool) : zero out the gradient of numbers that are being clamped during forward propagation.
currently requires forward_number to be a fixed point number.
- :param: backward_hooks (iterable) : iterable of functions that will be applied to gradients before backward quantization.
For example, this can be used to support custom scaling.
Returns:
A quantization function as specified (torch.Tensor -> torch.Tensor)
"""
for rounding in [forward_rounding, backward_rounding]:
assert rounding in ["stochastic", "nearest"], "invalid rounding type {:s}".format(rounding)
for num in [forward_number, backward_number]:
if num != None: assert isinstance(num, Number)
if clamping_grad_zero==False:
if forward_rounding=="nearest":
if type(forward_number)==BlockFloatingPoint:
forward_quant = lambda x, quant_module: quant_module.block_quantize_nearest(x, forward_number.wl, forward_number.dim)
elif type(forward_number)==FixedPoint:
forward_quant = lambda x, quant_module: quant_module.fixed_point_quantize_nearest(x, forward_number.wl,
forward_number.fl, forward_number.clamp,
forward_number.symmetric)
elif type(forward_number)==FloatingPoint:
forward_quant = lambda x, quant_module: quant_module.float_quantize_nearest(x, forward_number.man, forward_number.exp)
elif forward_rounding=="stochastic":
if type(forward_number)==BlockFloatingPoint:
forward_quant = lambda x, quant_module: quant_module.block_quantize_stochastic(x, forward_number.wl, forward_number.dim)
elif type(forward_number)==FixedPoint:
forward_quant = lambda x, quant_module: quant_module.fixed_point_quantize_stochastic(x, forward_number.wl, forward_number.fl,
forward_number.clamp, forward_number.symmetric)
elif type(forward_number)==FloatingPoint:
forward_quant = lambda x, quant_module: quant_module.float_quantize_stochastic(x, forward_number.man, forward_number.exp)
else:
if type(forward_number)==FixedPoint or forward_number==None:
assert forward_number==None or forward_number.clamp == True, "must use clamping if zeroing out clamped gradient"
if forward_rounding=="nearest":
forward_quant = lambda x, quant_module: quant_module.fixed_point_quantize_nearest_mask(x, forward_number.wl, forward_number.fl, forward_number.symmetric)
elif forward_rounding=="stochastic":
forward_quant = lambda x, quant_module: quant_module.fixed_point_quantize_stochastic_mask(x, forward_number.wl, forward_number.fl, forward_number.symmetric)
else:
raise ValueError("zeroing clamping gradient only support fixed point.")
if backward_rounding=="nearest":
if type(backward_number)==BlockFloatingPoint:
backward_quant = lambda a, quant_module: quant_module.block_quantize_nearest(a, backward_number.wl, backward_number.dim)
elif type(backward_number)==FixedPoint:
backward_quant = lambda a, quant_module: quant_module.fixed_point_quantize_nearest(a, backward_number.wl, backward_number.fl,
backward_number.clamp, backward_number.symmetric)
elif type(backward_number)==FloatingPoint:
backward_quant = lambda a, quant_module: quant_module.float_quantize_nearest(a, backward_number.man, backward_number.exp)
elif backward_rounding=="stochastic":
if type(backward_number)==BlockFloatingPoint:
backward_quant = lambda a, quant_module: quant_module.block_quantize_stochastic(a, backward_number.wl, backward_number.dim)
elif type(backward_number)==FixedPoint:
backward_quant = lambda a, quant_module: quant_module.fixed_point_quantize_stochastic(a, backward_number.wl, backward_number.fl,
backward_number.clamp, backward_number.symmetric)
elif type(backward_number)==FloatingPoint:
backward_quant = lambda a, quant_module: quant_module.float_quantize_stochastic(a, backward_number.man, backward_number.exp)
if clamping_grad_zero == False:
else:
return Rounding.apply
def fixed_point_quantize(x, wl, fl, clamp=True, symmetric=False, rounding="stochastic"):
"""
Quantize a single precision Floating Point into low-precision Fixed Point
Args:
- :param: `x` (torch.Tensor) : the single precision number to be quantized
- :param: `wl` (int) : word length of the fixed point number being simulated
- :param: `fl` (int) : fractional length of the fixed point number being simulated
- :param: `clamp` (bool, optional) : clamp input numbers into representable range. if false,
the quantization will only simulate the effect on precision
- :param: `symmetric` (bool, optional) : discard the minimum representable number to make the representable
range symmetric
- :param: `rounding` (string) : rounding mode, \"stochastic\" or \"nearest\" (default: \"stochastic\")
Returns:
- a quantized low-precision block floating point number (torch.Tensor)
"""
assert isinstance(x, torch.Tensor)
assert rounding in ["stochastic", "nearest"]
assert_wl_fl(wl, fl)
quant_module = get_module(x)
if rounding == "nearest":
out = quant_module.fixed_point_quantize_nearest(x.contiguous(), wl, fl, clamp, symmetric)
elif rounding == "stochastic":
out = quant_module.fixed_point_quantize_stochastic(x.contiguous(), wl, fl, clamp, symmetric)
return out
def block_quantize(x, wl, dim=-1, rounding="stochastic"):
"""
Quantize a single precision Floating Point into low-precision Block Floating Point
Args:
- :param: `x` (torch.Tensor) : the single precision number to be quantized
- :param: `wl` (int) : word length of the block floating point number being simulated
- :param: `rounding` (string) : rounding mode, \"stochastic\" or \"nearest\"
Returns:
- a quantized low-precision block floating point number (torch.Tensor)
"""
assert isinstance(x, torch.Tensor), "x is not a single precision Floating Point Tensor"
assert rounding in ["stochastic", "nearest"], "invalid rounding mode, {}".format(rounding)
quant_module = get_module(x)
if rounding=="nearest":
out = quant_module.block_quantize_nearest(x.contiguous(), wl, dim)
elif rounding=="stochastic":
out = quant_module.block_quantize_stochastic(x.contiguous(), wl, dim)
return out
def float_quantize(x, exp, man, rounding="stochastic"):
"""
Quantize a single precision Floating Point into low-precision Floating Point
Args:
- :attr: `x` (torch.Tensor) : the single precision number(torch.Tensor) to be quantized
- :attr: `exp` (int) : number of bits allocated for exponent
- :attr: `man` (int) : number of bits allocated for mantissa, not counting the virtual bit
- :attr: `rounding` (string) : rounding mode, \"stochastic\" or \"nearest\"
Returns:
- a quantized low-precision floating point number (torch.Tensor)
"""
assert isinstance(x, torch.Tensor), "x is not a single precision Floating Point Tensor"
assert rounding in ["stochastic", "nearest"], "invalid rounding mode, {}".format(rounding)
quant_module = get_module(x)
if rounding=="nearest":
out = quant_module.float_quantize_nearest(x.contiguous(), man, exp)
elif rounding=="stochastic":
out = quant_module.float_quantize_stochastic(x.contiguous(), man, exp)
return out
| 52.032787
| 172
| 0.632246
|
import torch
from qtorch import Number, FixedPoint, BlockFloatingPoint, FloatingPoint
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.utils.cpp_extension import load
import os
current_path = os.path.dirname(os.path.realpath(__file__))
quant_cpu = load(
name='quant_cpu',
sources=[
os.path.join(current_path, "quant_cpu/quant_cpu.cpp"),
os.path.join(current_path, "quant_cpu/bit_helper.cpp"),
os.path.join(current_path, "quant_cpu/sim_helper.cpp"),
]
)
if torch.cuda.is_available():
quant_cuda = load(
name='quant_cuda',
sources=[
os.path.join(current_path, "quant_cuda/quant_cuda.cpp"),
os.path.join(current_path, "quant_cuda/bit_helper.cu"),
os.path.join(current_path, "quant_cuda/sim_helper.cu"),
os.path.join(current_path, "quant_cuda/block_kernel.cu"),
os.path.join(current_path, "quant_cuda/float_kernel.cu"),
os.path.join(current_path, "quant_cuda/fixed_point_kernel.cu"),
os.path.join(current_path, "quant_cuda/quant.cu"),
]
)
else:
quant_cuda = quant_cpu
__all__ = ['fixed_point_quantize', 'block_quantize', 'float_quantize', "quantizer"]
def assert_wl_fl(wl, fl, stage=""):
if wl == -1 and fl != -1:
raise ValueError("fixed point {} wl {}, fl {}".format(stage, wl, fl))
def get_module(x):
if x.is_cuda:
quant_module = quant_cuda
else:
quant_module = quant_cpu
return quant_module
def quantizer(forward_number=None, backward_number=None,
forward_rounding="stochastic", backward_rounding="stochastic",
clamping_grad_zero=False, backward_hooks=[]):
"""
Creates a quantization function to support quantizing forward and backward process differently.
Args:
- :param: forward_number (qtorch.Number, optional) : the number format used for forward quantization.
if is None, the quantization would be a identity mapping.
- :param: backward_number (qtorch.Number, optional) : the number format used for backward quantization.
if is None, the quantization would be a identity mapping.
- :param: forward_rounding (string) : rounding mode, \"stochastic\" or \"nearest\" (default: \"stochastic\")
- :param: backward_rounding (string) : rounding mode, \"stochastic\" or \"nearest\" (default: \"stochastic\")
- :param: clamping_grad_zero (bool) : zero out the gradient of numbers that are being clamped during forward propagation.
currently requires forward_number to be a fixed point number.
- :param: backward_hooks (iterable) : iterable of functions that will be applied to gradients before backward quantization.
For example, this can be used to support custom scaling.
Returns:
A quantization function as specified (torch.Tensor -> torch.Tensor)
"""
for rounding in [forward_rounding, backward_rounding]:
assert rounding in ["stochastic", "nearest"], "invalid rounding type {:s}".format(rounding)
for num in [forward_number, backward_number]:
if num != None: assert isinstance(num, Number)
if clamping_grad_zero==False:
if forward_rounding=="nearest":
if type(forward_number)==BlockFloatingPoint:
forward_quant = lambda x, quant_module: quant_module.block_quantize_nearest(x, forward_number.wl, forward_number.dim)
elif type(forward_number)==FixedPoint:
forward_quant = lambda x, quant_module: quant_module.fixed_point_quantize_nearest(x, forward_number.wl,
forward_number.fl, forward_number.clamp,
forward_number.symmetric)
elif type(forward_number)==FloatingPoint:
forward_quant = lambda x, quant_module: quant_module.float_quantize_nearest(x, forward_number.man, forward_number.exp)
elif forward_rounding=="stochastic":
if type(forward_number)==BlockFloatingPoint:
forward_quant = lambda x, quant_module: quant_module.block_quantize_stochastic(x, forward_number.wl, forward_number.dim)
elif type(forward_number)==FixedPoint:
forward_quant = lambda x, quant_module: quant_module.fixed_point_quantize_stochastic(x, forward_number.wl, forward_number.fl,
forward_number.clamp, forward_number.symmetric)
elif type(forward_number)==FloatingPoint:
forward_quant = lambda x, quant_module: quant_module.float_quantize_stochastic(x, forward_number.man, forward_number.exp)
else:
if type(forward_number)==FixedPoint or forward_number==None:
assert forward_number==None or forward_number.clamp == True, "must use clamping if zeroing out clamped gradient"
if forward_rounding=="nearest":
forward_quant = lambda x, quant_module: quant_module.fixed_point_quantize_nearest_mask(x, forward_number.wl, forward_number.fl, forward_number.symmetric)
elif forward_rounding=="stochastic":
forward_quant = lambda x, quant_module: quant_module.fixed_point_quantize_stochastic_mask(x, forward_number.wl, forward_number.fl, forward_number.symmetric)
else:
raise ValueError("zeroing clamping gradient only support fixed point.")
if backward_rounding=="nearest":
if type(backward_number)==BlockFloatingPoint:
backward_quant = lambda a, quant_module: quant_module.block_quantize_nearest(a, backward_number.wl, backward_number.dim)
elif type(backward_number)==FixedPoint:
backward_quant = lambda a, quant_module: quant_module.fixed_point_quantize_nearest(a, backward_number.wl, backward_number.fl,
backward_number.clamp, backward_number.symmetric)
elif type(backward_number)==FloatingPoint:
backward_quant = lambda a, quant_module: quant_module.float_quantize_nearest(a, backward_number.man, backward_number.exp)
elif backward_rounding=="stochastic":
if type(backward_number)==BlockFloatingPoint:
backward_quant = lambda a, quant_module: quant_module.block_quantize_stochastic(a, backward_number.wl, backward_number.dim)
elif type(backward_number)==FixedPoint:
backward_quant = lambda a, quant_module: quant_module.fixed_point_quantize_stochastic(a, backward_number.wl, backward_number.fl,
backward_number.clamp, backward_number.symmetric)
elif type(backward_number)==FloatingPoint:
backward_quant = lambda a, quant_module: quant_module.float_quantize_stochastic(a, backward_number.man, backward_number.exp)
if clamping_grad_zero == False:
class Rounding(torch.autograd.Function):
@staticmethod
def forward(self, x):
if forward_number==None: return x
quant_module = get_module(x)
out = forward_quant(x.contiguous(), quant_module)
return out
@staticmethod
def backward(self, grad_output):
if self.needs_input_grad[0]:
if backward_number == None:
grad_input = grad_output
else:
quant_module = get_module(grad_output)
grad_input = backward_quant(grad_output.contiguous(), quant_module)
else:
grad_input = None
return grad_input
else:
class Rounding(torch.autograd.Function):
@staticmethod
def forward(self, x):
if forward_number==None:
self.mask = torch.zeros_like(x).byte()
return x
else:
quant_module = get_module(x)
out, mask = forward_quant(x.contiguous(), quant_module)
self.mask = mask
return out
@staticmethod
def backward(self, grad_output):
if self.needs_input_grad[0]:
if backward_number == None:
grad_input = grad_output
else:
quant_module = get_module(grad_output)
# grad_output = grad_output.contiguous().masked_fill_(self.mask, 0)
for f in backward_hooks:
grad_output = f(grad_output)
grad_input = backward_quant(grad_output.contiguous(), quant_module).masked_fill(self.mask, 0)
else:
grad_input = None
return grad_input
return Rounding.apply
def fixed_point_quantize(x, wl, fl, clamp=True, symmetric=False, rounding="stochastic"):
"""
Quantize a single precision Floating Point into low-precision Fixed Point
Args:
- :param: `x` (torch.Tensor) : the single precision number to be quantized
- :param: `wl` (int) : word length of the fixed point number being simulated
- :param: `fl` (int) : fractional length of the fixed point number being simulated
- :param: `clamp` (bool, optional) : clamp input numbers into representable range. if false,
the quantization will only simulate the effect on precision
- :param: `symmetric` (bool, optional) : discard the minimum representable number to make the representable
range symmetric
- :param: `rounding` (string) : rounding mode, \"stochastic\" or \"nearest\" (default: \"stochastic\")
Returns:
- a quantized low-precision block floating point number (torch.Tensor)
"""
assert isinstance(x, torch.Tensor)
assert rounding in ["stochastic", "nearest"]
assert_wl_fl(wl, fl)
quant_module = get_module(x)
if rounding == "nearest":
out = quant_module.fixed_point_quantize_nearest(x.contiguous(), wl, fl, clamp, symmetric)
elif rounding == "stochastic":
out = quant_module.fixed_point_quantize_stochastic(x.contiguous(), wl, fl, clamp, symmetric)
return out
def block_quantize(x, wl, dim=-1, rounding="stochastic"):
"""
Quantize a single precision Floating Point into low-precision Block Floating Point
Args:
- :param: `x` (torch.Tensor) : the single precision number to be quantized
- :param: `wl` (int) : word length of the block floating point number being simulated
- :param: `rounding` (string) : rounding mode, \"stochastic\" or \"nearest\"
Returns:
- a quantized low-precision block floating point number (torch.Tensor)
"""
assert isinstance(x, torch.Tensor), "x is not a single precision Floating Point Tensor"
assert rounding in ["stochastic", "nearest"], "invalid rounding mode, {}".format(rounding)
quant_module = get_module(x)
if rounding=="nearest":
out = quant_module.block_quantize_nearest(x.contiguous(), wl, dim)
elif rounding=="stochastic":
out = quant_module.block_quantize_stochastic(x.contiguous(), wl, dim)
return out
def float_quantize(x, exp, man, rounding="stochastic"):
"""
Quantize a single precision Floating Point into low-precision Floating Point
Args:
- :attr: `x` (torch.Tensor) : the single precision number(torch.Tensor) to be quantized
- :attr: `exp` (int) : number of bits allocated for exponent
- :attr: `man` (int) : number of bits allocated for mantissa, not counting the virtual bit
- :attr: `rounding` (string) : rounding mode, \"stochastic\" or \"nearest\"
Returns:
- a quantized low-precision floating point number (torch.Tensor)
"""
assert isinstance(x, torch.Tensor), "x is not a single precision Floating Point Tensor"
assert rounding in ["stochastic", "nearest"], "invalid rounding mode, {}".format(rounding)
quant_module = get_module(x)
if rounding=="nearest":
out = quant_module.float_quantize_nearest(x.contiguous(), man, exp)
elif rounding=="stochastic":
out = quant_module.float_quantize_stochastic(x.contiguous(), man, exp)
return out
| 1,851
| 280
| 106
|
37098085de756a93d7f4495599b694d1f17420e3
| 797
|
py
|
Python
|
pylmod/tests/common.py
|
mitodl/PyLmod
|
c028ceca631c5e1032b23a4d1a8cde17450df252
|
[
"BSD-2-Clause"
] | 4
|
2015-09-08T18:58:10.000Z
|
2018-03-06T01:14:46.000Z
|
pylmod/tests/common.py
|
mitodl/PyLmod
|
c028ceca631c5e1032b23a4d1a8cde17450df252
|
[
"BSD-2-Clause"
] | 73
|
2015-02-12T14:44:50.000Z
|
2022-01-19T00:05:44.000Z
|
pylmod/tests/common.py
|
mitodl/PyLmod
|
c028ceca631c5e1032b23a4d1a8cde17450df252
|
[
"BSD-2-Clause"
] | 2
|
2015-04-30T01:19:23.000Z
|
2015-08-07T07:30:08.000Z
|
"""
Base class and common constants needed for pylmod tests
"""
import os
from unittest import TestCase
class BaseTest(TestCase):
"""
Base class with convenient constants and URL endpoints for pylmod testing.
"""
# This should be removed if we end up with common methods, but for
# now they are just common attributes.
# pylint: disable=too-few-public-methods
DATA_ROOT = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data'
)
CERT = os.path.join(DATA_ROOT, 'certs', 'test_cert.pem')
URLBASE = 'https://testingstuff/'
GRADEBOOK_REGISTER_BASE = URLBASE + 'service/gradebook/'
MEMBERSHIP_REGISTER_BASE = URLBASE + 'service/membership/'
GBUUID = 'STELLAR:/project/testingstuff'
CUUID = '/project/testingstuff'
| 28.464286
| 78
| 0.690088
|
"""
Base class and common constants needed for pylmod tests
"""
import os
from unittest import TestCase
class BaseTest(TestCase):
"""
Base class with convenient constants and URL endpoints for pylmod testing.
"""
# This should be removed if we end up with common methods, but for
# now they are just common attributes.
# pylint: disable=too-few-public-methods
DATA_ROOT = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data'
)
CERT = os.path.join(DATA_ROOT, 'certs', 'test_cert.pem')
URLBASE = 'https://testingstuff/'
GRADEBOOK_REGISTER_BASE = URLBASE + 'service/gradebook/'
MEMBERSHIP_REGISTER_BASE = URLBASE + 'service/membership/'
GBUUID = 'STELLAR:/project/testingstuff'
CUUID = '/project/testingstuff'
| 0
| 0
| 0
|
f3d331fbd5da98d5b101c7b988080ae9e54c7c8e
| 464
|
py
|
Python
|
tests/proc_006.py
|
meisterluk/taptaptap3
|
6a377ae3f6ffd92a983a7f809132c9de20ed0d76
|
[
"BSD-3-Clause"
] | null | null | null |
tests/proc_006.py
|
meisterluk/taptaptap3
|
6a377ae3f6ffd92a983a7f809132c9de20ed0d76
|
[
"BSD-3-Clause"
] | 2
|
2019-09-26T13:48:11.000Z
|
2019-09-30T21:30:12.000Z
|
tests/proc_006.py
|
meisterluk/taptaptap3
|
6a377ae3f6ffd92a983a7f809132c9de20ed0d76
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
from taptaptap3.proc import plan, ok, out
plan(tests=10)
ok("Starting the program")
ok("Starting the engine")
ok("Find the object")
ok("Transport object to target")
ok("Check for existing fire")
ok("Place it beneath the desk")
ok("Search for fire extinguisher")
ok("Extinguish fire")
ok("Put fire extinguisher back")
ok("Terminate")
out()
## validity: 0
## ok testcases: 10 / 10
## bailout: no
## stderr: Find the object
| 19.333333
| 41
| 0.685345
|
#!/usr/bin/env python3
from taptaptap3.proc import plan, ok, out
plan(tests=10)
ok("Starting the program")
ok("Starting the engine")
ok("Find the object")
ok("Transport object to target")
ok("Check for existing fire")
ok("Place it beneath the desk")
ok("Search for fire extinguisher")
ok("Extinguish fire")
ok("Put fire extinguisher back")
ok("Terminate")
out()
## validity: 0
## ok testcases: 10 / 10
## bailout: no
## stderr: Find the object
| 0
| 0
| 0
|
8dad65aff74ff1e8d6b5e537733be0341ed93db9
| 784
|
py
|
Python
|
tests/test_nag_max_min.py
|
daviddoret/pyxag
|
6884c7e100d28c3ce6273248caa40eaeab920bc5
|
[
"MIT"
] | 1
|
2019-10-27T15:56:27.000Z
|
2019-10-27T15:56:27.000Z
|
tests/test_nag_max_min.py
|
daviddoret/pynag
|
6884c7e100d28c3ce6273248caa40eaeab920bc5
|
[
"MIT"
] | 11
|
2019-11-04T18:21:16.000Z
|
2019-11-07T03:22:41.000Z
|
tests/test_nag_max_min.py
|
daviddoret/pynag
|
6884c7e100d28c3ce6273248caa40eaeab920bc5
|
[
"MIT"
] | null | null | null |
import unittest
from classes.nag import Nag
if __name__ == '__main__':
unittest.main()
| 35.636364
| 73
| 0.628827
|
import unittest
from classes.nag import Nag
class TestNagMaxMin(unittest.TestCase):
def test_more_complex_1(self):
nag = Nag('Test')
nag.set_nand('i1', 'i1', 'n1')
nag.set_nand('i1', 'i1', 'n2')
nag.set_nand('n1', 'n2', 'n3')
nag.set_nand('i1', 'n3', 'n4')
nag.set_nand('i1', 'n3', 'n5')
nag.set_output('n4', 'o1')
nag.set_output('n5', 'o2')
nag.set_execution_mode()
self.assertEqual(nag.get_vertex_predecessors_min_number('n3'), 1)
self.assertEqual(nag.get_vertex_predecessors_max_number('n3'), 2)
self.assertEqual(nag.get_vertex_successors_min_number('n3'), 4)
self.assertEqual(nag.get_vertex_successors_max_number('n3'), 5)
if __name__ == '__main__':
unittest.main()
| 625
| 18
| 50
|
e06289fab530f54b3cac133e2744cd2572babf70
| 6,475
|
py
|
Python
|
tests/test_extract_features.py
|
stjordanis/vissl
|
8800989f9bf073693b777c14ea01b585d4b306d6
|
[
"MIT"
] | 1
|
2021-05-03T18:52:32.000Z
|
2021-05-03T18:52:32.000Z
|
tests/test_extract_features.py
|
pzharrington/vissl
|
b647c256447af7ea66655811849be1f642377db8
|
[
"MIT"
] | null | null | null |
tests/test_extract_features.py
|
pzharrington/vissl
|
b647c256447af7ea66655811849be1f642377db8
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import torch
from hydra.experimental import compose, initialize_config_module
from vissl.utils.hydra_config import convert_to_attrdict
from vissl.utils.misc import merge_features
from vissl.utils.test_utils import (
gpu_test,
in_temporary_directory,
run_integration_test,
)
| 49.05303
| 88
| 0.592432
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import torch
from hydra.experimental import compose, initialize_config_module
from vissl.utils.hydra_config import convert_to_attrdict
from vissl.utils.misc import merge_features
from vissl.utils.test_utils import (
gpu_test,
in_temporary_directory,
run_integration_test,
)
class TestExtractClusterWorkflow(unittest.TestCase):
@staticmethod
def _create_pretraining_config(num_gpu: int = 2):
with initialize_config_module(config_module="vissl.config"):
cfg = compose(
"defaults",
overrides=[
"config=test/integration_test/quick_swav",
"config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
"config.DATA.TRAIN.DATA_LIMIT=40",
"config.SEED_VALUE=0",
"config.MODEL.AMP_PARAMS.USE_AMP=False",
"config.MODEL.SYNC_BN_CONFIG.CONVERT_BN_TO_SYNC_BN=True",
"config.MODEL.SYNC_BN_CONFIG.SYNC_BN_TYPE=pytorch",
"config.MODEL.AMP_PARAMS.AMP_TYPE=pytorch",
"config.LOSS.swav_loss.epsilon=0.03",
"config.MODEL.FSDP_CONFIG.flatten_parameters=True",
"config.MODEL.FSDP_CONFIG.mixed_precision=False",
"config.MODEL.FSDP_CONFIG.fp32_reduce_scatter=False",
"config.MODEL.FSDP_CONFIG.compute_dtype=float32",
f"config.DISTRIBUTED.NUM_PROC_PER_NODE={num_gpu}",
"config.LOG_FREQUENCY=1",
"config.OPTIMIZER.construct_single_param_group_only=True",
"config.DATA.TRAIN.BATCHSIZE_PER_REPLICA=4",
"config.OPTIMIZER.use_larc=False",
],
)
args, config = convert_to_attrdict(cfg)
return config
@staticmethod
def _create_extract_features_config(checkpoint_path: str, num_gpu: int = 2):
with initialize_config_module(config_module="vissl.config"):
cfg = compose(
"defaults",
overrides=[
"config=feature_extraction/extract_resnet_in1k_8gpu",
"+config/feature_extraction/with_head=rn50_swav",
f"config.MODEL.WEIGHTS_INIT.PARAMS_FILE={checkpoint_path}",
"config.DATA.TRAIN.DATA_SOURCES=[synthetic]",
"config.DATA.TRAIN.LABEL_SOURCES=[synthetic]",
"config.DATA.TEST.DATA_SOURCES=[synthetic]",
"config.DATA.TEST.LABEL_SOURCES=[synthetic]",
"config.DATA.TRAIN.DATA_LIMIT=40",
"config.DATA.TEST.DATA_LIMIT=20",
"config.SEED_VALUE=0",
"config.MODEL.AMP_PARAMS.USE_AMP=False",
"config.MODEL.SYNC_BN_CONFIG.CONVERT_BN_TO_SYNC_BN=True",
"config.MODEL.SYNC_BN_CONFIG.SYNC_BN_TYPE=pytorch",
"config.MODEL.AMP_PARAMS.AMP_TYPE=pytorch",
"config.LOSS.swav_loss.epsilon=0.03",
"config.MODEL.FSDP_CONFIG.flatten_parameters=True",
"config.MODEL.FSDP_CONFIG.mixed_precision=False",
"config.MODEL.FSDP_CONFIG.fp32_reduce_scatter=False",
"config.MODEL.FSDP_CONFIG.compute_dtype=float32",
f"config.DISTRIBUTED.NUM_PROC_PER_NODE={num_gpu}",
"config.LOG_FREQUENCY=1",
"config.OPTIMIZER.construct_single_param_group_only=True",
"config.DATA.TRAIN.BATCHSIZE_PER_REPLICA=4",
"config.DATA.TEST.BATCHSIZE_PER_REPLICA=2",
"config.OPTIMIZER.use_larc=False",
],
)
args, config = convert_to_attrdict(cfg)
return config
@gpu_test(gpu_count=2)
def test_extract_cluster_assignment_ddp(self):
with in_temporary_directory() as pretrain_dir:
# Run a pre-training to have some weights to being with
pretrain_config = self._create_pretraining_config()
run_integration_test(pretrain_config)
# Create a directory to contain the extracted features
with in_temporary_directory() as extract_dir:
# Run the extract engine in a separate directory to check that
# it is correctly able to output the feature in a another dir
with in_temporary_directory():
extract_config = self._create_extract_features_config(
checkpoint_path=os.path.join(pretrain_dir, "checkpoint.torch")
)
extract_config.EXTRACT_FEATURES.OUTPUT_DIR = extract_dir
run_integration_test(extract_config, engine_name="extract_features")
# Check the content of the directory containing the extracted dirs
folder_content = os.listdir(extract_dir)
print(folder_content)
for rank in [0, 1]:
for chunk in range(5):
for file in [
f"rank{rank}_chunk{chunk}_train_heads_features.npy",
f"rank{rank}_chunk{chunk}_train_heads_inds.npy",
f"rank{rank}_chunk{chunk}_train_heads_targets.npy",
]:
self.assertIn(file, folder_content)
# Verify that we can merge the features back (train split)
train_feat = merge_features(extract_dir, "train", "heads")
print(train_feat)
self.assertEqual(train_feat["features"].shape, torch.Size([40, 128]))
self.assertEqual(train_feat["targets"].shape, torch.Size([40, 1]))
self.assertEqual(train_feat["inds"].shape, torch.Size([40]))
# Verify that we can merge the features back (test split)
test_feat = merge_features(extract_dir, "test", "heads")
self.assertEqual(test_feat["features"].shape, torch.Size([20, 128]))
self.assertEqual(test_feat["targets"].shape, torch.Size([20, 1]))
self.assertEqual(test_feat["inds"].shape, torch.Size([20]))
| 5,786
| 174
| 23
|
1789f865fff7494de4353119c0d168a977ab7de7
| 2,178
|
py
|
Python
|
lab4/src/lab4_main.py
|
DemerzelSun12/HIT-2020-Fall-Machine-Learning-Labs
|
294331af274e30caab7e8c5b8f910a1654b0f54f
|
[
"MIT"
] | null | null | null |
lab4/src/lab4_main.py
|
DemerzelSun12/HIT-2020-Fall-Machine-Learning-Labs
|
294331af274e30caab7e8c5b8f910a1654b0f54f
|
[
"MIT"
] | null | null | null |
lab4/src/lab4_main.py
|
DemerzelSun12/HIT-2020-Fall-Machine-Learning-Labs
|
294331af274e30caab7e8c5b8f910a1654b0f54f
|
[
"MIT"
] | null | null | null |
from src.PCA import *
from src.generate_data import *
from src.generate_picture import *
from src.dimensionality_reduction_image import *
if __name__ == '__main__':
main()
| 29.432432
| 80
| 0.667126
|
from src.PCA import *
from src.generate_data import *
from src.generate_picture import *
from src.dimensionality_reduction_image import *
def test_pca(data):
generate_3_dimension_picture(data)
central_data, eig_vector, data_mean = PCA(data, 2).pca()
pca_data = np.dot(central_data, eig_vector)
generate_2_dimension_picture(pca_data)
def test_image_data_set():
data = read_image_data()
image_number, image_feature = data[0].shape
print(data.shape)
central_data = []
eig_vector = []
data_mean = []
pca_data = []
rebuild_data = []
for i in range(len(data)):
central_data_i, eig_vector_i, data_mean_i = PCA(data[i], 8).pca()
central_data.append(central_data_i)
eig_vector.append(eig_vector_i)
data_mean.append(data_mean_i)
print(eig_vector[i])
eig_vector_i = np.real(eig_vector_i)
pca_data.append(np.dot(central_data_i, eig_vector_i))
# print(pca_data)
rebuild_data.append(np.dot(pca_data[i], eig_vector[i].T) + data_mean[i])
plt.figure(figsize=(50, 50))
for i in range(len(data)):
plt.subplot(3, 3, i + 1)
plt.imshow(rebuild_data[i], cmap=plt.cm.gray)
plt.show()
print("the signal to noise ratio of the image after PCA:")
for i in range(len(data)):
ratio = calculate_noise_ratio(data[i], rebuild_data[i])
print('The noise ratio of image ' + str(i) + ' is ' + str(ratio))
def test_single_picture():
data = read_image_data()
image_number, image_feature = data.shape
print(data.shape)
central_data, eig_vector, data_mean = PCA(data, 20).pca()
print(eig_vector)
eig_vector = np.real(eig_vector)
pca_data = np.dot(central_data, eig_vector)
rebuild_data = np.dot(pca_data, eig_vector.T) + data_mean
plt.figure(figsize=(50, 50))
plt.imshow(rebuild_data)
plt.show()
def main():
data_1 = generate_data(1000, 0, 100)
# print(np.shape(data_1))
data_2 = generate_data(2000, 0, 10)
data_3 = generate_data(2000, 1, 10)
test_pca(data_1)
test_pca(data_2)
test_pca(data_3)
test_image_data_set()
if __name__ == '__main__':
main()
| 1,904
| 0
| 92
|
9eeabc98a8cf3c977f1c9739fce826a98ead06a5
| 10,652
|
py
|
Python
|
src/scripts/collect_pose_data_pene_no_wall_big.py
|
yifan-you-37/omnihang
|
c80b699b2cf2cf3422201cc8c3fa572d0e01d5a2
|
[
"MIT"
] | 1
|
2022-01-16T20:24:09.000Z
|
2022-01-16T20:24:09.000Z
|
src/scripts/collect_pose_data_pene_no_wall_big.py
|
yifan-you-37/omnihang
|
c80b699b2cf2cf3422201cc8c3fa572d0e01d5a2
|
[
"MIT"
] | null | null | null |
src/scripts/collect_pose_data_pene_no_wall_big.py
|
yifan-you-37/omnihang
|
c80b699b2cf2cf3422201cc8c3fa572d0e01d5a2
|
[
"MIT"
] | 1
|
2022-03-16T03:14:37.000Z
|
2022-03-16T03:14:37.000Z
|
import pybullet
import time
import numpy as np
import random
# np.random.seed(5)
# random.seed(5)
import sys
import os
import argparse
import csv
from scipy.spatial.transform import Rotation
from collect_pose_data import PoseDataCollector
sys.path.insert(1, '../utils/')
from coord_helper import *
from data_helper import *
from collision_helper import *
import bullet_client as bc
sys.path.insert(1, '../lin_my/')
from classifier_dataset_torch import ClassifierDataset
from scipy.spatial import KDTree
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--home_dir_data", default="../data")
parser.add_argument("--hook_name", default='')
parser.add_argument("--sherlock", action='store_true')
parser.add_argument("--obj_cat_split_id", type=int, default=-1)
args = parser.parse_args()
obj_cat_split_id = int(args.obj_cat_split_id)
if args.sherlock:
args.home_dir_data = '/scratch/groups/bohg/hang'
assert args.hook_name != ''
assert obj_cat_split_id >= 0
data_dir = os.path.join(args.home_dir_data, 'geo_data')
labels_folder_dir = os.path.join(args.home_dir_data, 'geo_data/labels/')
exclude_dir = os.path.join(args.home_dir_data, 'exclude')
pos_collection_result_folder_dir = os.path.join(args.home_dir_data, 'collection_result_pene_big_pos_new')
collection_result_folder_dir = os.path.join(args.home_dir_data, 'collection_result')
neg_collection_result_folder_dir = os.path.join(args.home_dir_data, 'collection_result_pene_big_neg_new')
pos_labels_dir = os.path.join(pos_collection_result_folder_dir, 'labels')
neg_labels_dir = os.path.join(neg_collection_result_folder_dir, 'labels')
mkdir_if_not(pos_collection_result_folder_dir)
mkdir_if_not(neg_collection_result_folder_dir)
mkdir_if_not(pos_labels_dir)
mkdir_if_not(neg_labels_dir)
all_hook_name, all_hook_urdf, all_object_name, all_object_urdf = load_all_hooks_object_w_split_id(obj_cat_split_id, data_dir, exclude_dir, labels_folder_dir, True, True, with_wall=False)
p_id = bc.BulletClient(connection_mode=pybullet.DIRECT)
cp_result_folder_dir = os.path.join(args.home_dir_data, 'dataset_cp')
train_list_dir = os.path.join(cp_result_folder_dir, 'labels', 'train_list.txt')
test_list_dir = os.path.join(cp_result_folder_dir, 'labels', 'test_list.txt')
train_set = ClassifierDataset(args.home_dir_data, train_list_dir, False, split='train', with_wall=False, one_per_pair=True)
test_set = ClassifierDataset(args.home_dir_data, test_list_dir, False, split='test', with_wall=False, one_per_pair=True)
if not os.path.exists(neg_collection_result_folder_dir):
os.mkdir(neg_collection_result_folder_dir)
collector = PeneDataCollector(p_id)
ct = 0
print('result file names', len(train_set.all_result_file_names), len(test_set.all_result_file_names))
for i, hook_name in enumerate(all_hook_name):
if args.hook_name != '' and args.hook_name != hook_name:
continue
out_pos_labels_dir = os.path.join(pos_labels_dir, '{}.txt'.format(hook_name))
out_neg_labels_dir = os.path.join(neg_labels_dir, '{}.txt'.format(hook_name))
# if os.path.exists(out_pos_labels_dir) and os.path.exists(out_neg_labels_dir):
# print('skip', hook_name)
# continue
hook_urdf = all_hook_urdf[i]
hook_bullet_id, hook_scaling = collector.init_hook(hook_urdf)
hook_world_pos_offset = get_hook_wall_offset(hook_urdf)
hook_pc_dir = get_numpy_dir_from_urdf(hook_urdf)
hook_world_pos = collector.get_hook_world_pos(hook_bullet_id, hook_world_pos_offset)
hook_pc = np.load(hook_pc_dir)
hook_tree = KDTree(hook_pc[:, :3], leafsize=1000)
num_pos_dict = {}
num_neg_dict = {}
for j, object_name in enumerate(all_object_name):
# if not 'daily_object' in object_name:
# continue
result_file_name = hook_name + '_' + object_name
if (not result_file_name in train_set.all_result_file_names) \
and (not result_file_name in test_set.all_result_file_names):
continue
object_urdf = all_object_urdf[j]
object_pc_dir = get_numpy_dir_from_urdf(object_urdf)
object_pc = np.load(object_pc_dir)
print(result_file_name)
neg_out_dir = os.path.join(neg_collection_result_folder_dir, result_file_name + '.txt')
pos_out_dir = os.path.join(pos_collection_result_folder_dir, result_file_name + '.txt')
# result_dir = os.path.join(collection_result_folder_dir, result_file_name+ '.txt')
# if not os.path.isfile(result_dir):
# continue
# result_file_poses = load_result_file(result_dir)
# if result_file_poses.shape[0] == 0:
# continue
# if os.path.isfile(out_dir):
# continue
ct += 1
object_bullet_id = collector.p.loadURDF(object_urdf, basePosition=[0, 0, 2], baseOrientation=[0, 0, 0, 1], globalScaling=1, useFixedBase=False)
object_scaling = collector.p.getCollisionShapeData(object_bullet_id, -1)[0][3][0]
pos_result_arr, neg_result_arr = collector.collect_pene_data_one_hook_object(hook_bullet_id, object_bullet_id, hook_urdf, object_urdf, hook_scaling, object_scaling, hook_world_pos,
hook_pc, object_pc, hook_tree, None)
num_pos_dict[result_file_name] = len(pos_result_arr)
num_neg_dict[result_file_name] = len(neg_result_arr)
print(len(pos_result_arr), len(neg_result_arr), result_file_name)
with open(pos_out_dir, 'w+') as f:
for result in pos_result_arr:
f.write(comma_separated(result) + '\n')
with open(neg_out_dir, 'w+') as f:
for result in neg_result_arr:
f.write(comma_separated(result) + '\n')
# print(pos_out_dir, neg_out_dir)
collector.p.removeBody(object_bullet_id)
if (ct + 1) % 30 == 0:
print('reset')
collector.p.disconnect()
p_id = bc.BulletClient(connection_mode=pybullet.DIRECT)
collector = PeneDataCollector(p_id)
hook_bullet_id, hook_scaling = collector.init_hook(hook_urdf)
out_pos_labels_dir = os.path.join(pos_labels_dir, '{}.txt'.format(hook_name))
out_neg_labels_dir = os.path.join(neg_labels_dir, '{}.txt'.format(hook_name))
collector.p.removeBody(hook_bullet_id)
dict_to_csv(out_pos_labels_dir, num_pos_dict)
dict_to_csv(out_neg_labels_dir, num_neg_dict)
# for j in range(20000):
# collector.p.stepSimulation()
# time.sleep(1./240.)
| 39.306273
| 187
| 0.747747
|
import pybullet
import time
import numpy as np
import random
# np.random.seed(5)
# random.seed(5)
import sys
import os
import argparse
import csv
from scipy.spatial.transform import Rotation
from collect_pose_data import PoseDataCollector
sys.path.insert(1, '../utils/')
from coord_helper import *
from data_helper import *
from collision_helper import *
import bullet_client as bc
sys.path.insert(1, '../lin_my/')
from classifier_dataset_torch import ClassifierDataset
def my_sample_points_in_bb_uniform(n, bb_low, bb_high):
# n_dim = int(math.ceil(n**(1./3)))
sample_x = np.random.uniform(bb_low[0], bb_high[0], size=n)
sample_y = np.random.uniform(bb_low[1], bb_high[1], size=n)
sample_z = np.random.uniform(bb_low[2], bb_high[2], size=n)
# return cartesian_product(sample_x, sample_y, sample_z)
# print(np.stack([sample_x, sample_y, sample_z], axis=1).shape)
return np.stack([sample_x, sample_y, sample_z], axis=1)
def sample_points_hook_for_collide(hook_bb, object_bb, bb_extra_dist=0.1, p=None):
bb_upper = np.max(hook_bb,axis=0)
bb_lower = np.min(hook_bb,axis=0)
object_xx = object_bb[1][0] - object_bb[0][0]
object_yy = object_bb[1][1] - object_bb[0][1]
object_zz = object_bb[1][2] - object_bb[0][2]
object_xx = object_yy = object_zz = max(object_xx, object_yy, object_zz)
bb_extra_dist = object_xx / 1.5
bb_upper += bb_extra_dist
# bb_upper[1] += 0.3
# bb_upper[2] += 0.3
bb_lower -= bb_extra_dist
# bb_upper[2] += 0.2
# p.addUserDebugLine(bb_lower,bb_upper, lineWidth=19)
# uniform_points = sample_points_in_sphere_uniform(1000, center=(bb_upper + bb_lower) / 2, radius=np.linalg.norm(bb_upper - bb_lower)/2.)
uniform_points = my_sample_points_in_bb_uniform(int(1000), bb_lower, bb_upper)
# drawAABB([np.min(uniform_points, axis=0), np.max(uniform_points, axis=0)], p)
# input('bb1')
# drawAABB([bb_lower, bb_upper], p)
# input('bb2')
# filter_mask = filter_inside_bb(uniform_points, bb_lower, bb_upper)
# uniform_points = uniform_points[filter_mask]
return uniform_points
def dict_to_csv(out_dir, all_data):
print('writing', out_dir)
w = csv.writer(open(out_dir, "w+"))
for key, val in all_data.items():
w.writerow([key, val])
class PeneDataCollector(PoseDataCollector):
def __init__(self, p_id):
super(PeneDataCollector, self).__init__(p_id)
def collect_pene_data_one_hook_object(self, hook_bullet_id, object_bullet_id, hook_urdf, object_urdf, hook_scaling, object_scaling, hook_world_pos,
hook_pc_n, object_pc_n, hook_tree, result_file_poses):
try:
hook_bb = self.p.getAABB(hook_bullet_id, 0)
except:
hook_bb = self.p.getAABB(hook_bullet_id, -1)
object_bb = self.p.getAABB(object_bullet_id)
ox, oy, oz = object_bb[1][0] - object_bb[0][0], object_bb[1][1] - object_bb[0][1], object_bb[1][2] - object_bb[0][2]
# print('ox', ox, oy, oz)
potential_pos_world = sample_points_hook_for_collide(hook_bb, object_bb, np.max([ox, oy, oz]), self.p, )
potential_quat = sample_quat_uniform(potential_pos_world.shape[0])
ct = 0
pos_result_arr = []
neg_result_arr = []
for i in range(potential_pos_world.shape[0]):
# pose = result_file_poses[0][-7:]
object_pos_local = potential_pos_world[i] - hook_world_pos
object_quat_local = potential_quat[i]
# object_pos_local = pose[:3]
# object_quat_local = pose[3:]
# hook wall 3 bag 2
# pose = np.array([0.5605147, -0.00370588, 0.9306318, -0.74062765, -0.28224521, 0.26840015, 0.54751227])
self.p.resetBasePositionAndOrientation(object_bullet_id, potential_pos_world[i], potential_quat[i])
# self.p.resetBasePositionAndOrientation(object_bullet_id, pose[:3] + hook_world_pos, pose[3:])
if self.check_object_touches_ground(object_bullet_id):
continue
# use getContactPoint
# self.p.stepSimulation()
# for tmp in self.p.getContactPoints(hook_bullet_id, object_bullet_id):
# if tmp[8] < -0.001:
# pene = True
# break
# object_pos_local, object_quat_local = self.p.getBasePositionAndOrientation(object_bullet_id)
# object_pos_local -= hook_world_pos
# use getClosesetPoint
# close_points = self.p.getClosestPoints(hook_bullet_id, object_bullet_id, distance=0.01)
# if len(close_points) == 0:
# continue
# for tmp in close_points:
# if tmp[8] < -0.001:
# pene = True
# break
pene_dist = fcl_get_dist(hook_urdf, object_urdf, pose_transl=object_pos_local, pose_quat=object_quat_local, urdf=True)
if pene_dist > 0.02:
continue
pene = bool(pene_dist == 0)
if pene:
# if len(pos_result_arr) < 20:
pos_result_arr.append([
hook_scaling, object_scaling, *object_pos_local, *object_quat_local
])
elif not pene:
# if len(neg_result_arr) < 20:
neg_result_arr.append([
hook_scaling, object_scaling, *object_pos_local, *object_quat_local
])
# if len(pos_result_arr) > 5 and len(neg_result_arr) > 5:
# break
# if ct >= 10:
# break
return pos_result_arr, neg_result_arr
from scipy.spatial import KDTree
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--home_dir_data", default="../data")
parser.add_argument("--hook_name", default='')
parser.add_argument("--sherlock", action='store_true')
parser.add_argument("--obj_cat_split_id", type=int, default=-1)
args = parser.parse_args()
obj_cat_split_id = int(args.obj_cat_split_id)
if args.sherlock:
args.home_dir_data = '/scratch/groups/bohg/hang'
assert args.hook_name != ''
assert obj_cat_split_id >= 0
data_dir = os.path.join(args.home_dir_data, 'geo_data')
labels_folder_dir = os.path.join(args.home_dir_data, 'geo_data/labels/')
exclude_dir = os.path.join(args.home_dir_data, 'exclude')
pos_collection_result_folder_dir = os.path.join(args.home_dir_data, 'collection_result_pene_big_pos_new')
collection_result_folder_dir = os.path.join(args.home_dir_data, 'collection_result')
neg_collection_result_folder_dir = os.path.join(args.home_dir_data, 'collection_result_pene_big_neg_new')
pos_labels_dir = os.path.join(pos_collection_result_folder_dir, 'labels')
neg_labels_dir = os.path.join(neg_collection_result_folder_dir, 'labels')
mkdir_if_not(pos_collection_result_folder_dir)
mkdir_if_not(neg_collection_result_folder_dir)
mkdir_if_not(pos_labels_dir)
mkdir_if_not(neg_labels_dir)
all_hook_name, all_hook_urdf, all_object_name, all_object_urdf = load_all_hooks_object_w_split_id(obj_cat_split_id, data_dir, exclude_dir, labels_folder_dir, True, True, with_wall=False)
p_id = bc.BulletClient(connection_mode=pybullet.DIRECT)
cp_result_folder_dir = os.path.join(args.home_dir_data, 'dataset_cp')
train_list_dir = os.path.join(cp_result_folder_dir, 'labels', 'train_list.txt')
test_list_dir = os.path.join(cp_result_folder_dir, 'labels', 'test_list.txt')
train_set = ClassifierDataset(args.home_dir_data, train_list_dir, False, split='train', with_wall=False, one_per_pair=True)
test_set = ClassifierDataset(args.home_dir_data, test_list_dir, False, split='test', with_wall=False, one_per_pair=True)
if not os.path.exists(neg_collection_result_folder_dir):
os.mkdir(neg_collection_result_folder_dir)
collector = PeneDataCollector(p_id)
ct = 0
print('result file names', len(train_set.all_result_file_names), len(test_set.all_result_file_names))
for i, hook_name in enumerate(all_hook_name):
if args.hook_name != '' and args.hook_name != hook_name:
continue
out_pos_labels_dir = os.path.join(pos_labels_dir, '{}.txt'.format(hook_name))
out_neg_labels_dir = os.path.join(neg_labels_dir, '{}.txt'.format(hook_name))
# if os.path.exists(out_pos_labels_dir) and os.path.exists(out_neg_labels_dir):
# print('skip', hook_name)
# continue
hook_urdf = all_hook_urdf[i]
hook_bullet_id, hook_scaling = collector.init_hook(hook_urdf)
hook_world_pos_offset = get_hook_wall_offset(hook_urdf)
hook_pc_dir = get_numpy_dir_from_urdf(hook_urdf)
hook_world_pos = collector.get_hook_world_pos(hook_bullet_id, hook_world_pos_offset)
hook_pc = np.load(hook_pc_dir)
hook_tree = KDTree(hook_pc[:, :3], leafsize=1000)
num_pos_dict = {}
num_neg_dict = {}
for j, object_name in enumerate(all_object_name):
# if not 'daily_object' in object_name:
# continue
result_file_name = hook_name + '_' + object_name
if (not result_file_name in train_set.all_result_file_names) \
and (not result_file_name in test_set.all_result_file_names):
continue
object_urdf = all_object_urdf[j]
object_pc_dir = get_numpy_dir_from_urdf(object_urdf)
object_pc = np.load(object_pc_dir)
print(result_file_name)
neg_out_dir = os.path.join(neg_collection_result_folder_dir, result_file_name + '.txt')
pos_out_dir = os.path.join(pos_collection_result_folder_dir, result_file_name + '.txt')
# result_dir = os.path.join(collection_result_folder_dir, result_file_name+ '.txt')
# if not os.path.isfile(result_dir):
# continue
# result_file_poses = load_result_file(result_dir)
# if result_file_poses.shape[0] == 0:
# continue
# if os.path.isfile(out_dir):
# continue
ct += 1
object_bullet_id = collector.p.loadURDF(object_urdf, basePosition=[0, 0, 2], baseOrientation=[0, 0, 0, 1], globalScaling=1, useFixedBase=False)
object_scaling = collector.p.getCollisionShapeData(object_bullet_id, -1)[0][3][0]
pos_result_arr, neg_result_arr = collector.collect_pene_data_one_hook_object(hook_bullet_id, object_bullet_id, hook_urdf, object_urdf, hook_scaling, object_scaling, hook_world_pos,
hook_pc, object_pc, hook_tree, None)
num_pos_dict[result_file_name] = len(pos_result_arr)
num_neg_dict[result_file_name] = len(neg_result_arr)
print(len(pos_result_arr), len(neg_result_arr), result_file_name)
with open(pos_out_dir, 'w+') as f:
for result in pos_result_arr:
f.write(comma_separated(result) + '\n')
with open(neg_out_dir, 'w+') as f:
for result in neg_result_arr:
f.write(comma_separated(result) + '\n')
# print(pos_out_dir, neg_out_dir)
collector.p.removeBody(object_bullet_id)
if (ct + 1) % 30 == 0:
print('reset')
collector.p.disconnect()
p_id = bc.BulletClient(connection_mode=pybullet.DIRECT)
collector = PeneDataCollector(p_id)
hook_bullet_id, hook_scaling = collector.init_hook(hook_urdf)
out_pos_labels_dir = os.path.join(pos_labels_dir, '{}.txt'.format(hook_name))
out_neg_labels_dir = os.path.join(neg_labels_dir, '{}.txt'.format(hook_name))
collector.p.removeBody(hook_bullet_id)
dict_to_csv(out_pos_labels_dir, num_pos_dict)
dict_to_csv(out_neg_labels_dir, num_neg_dict)
# for j in range(20000):
# collector.p.stepSimulation()
# time.sleep(1./240.)
| 4,329
| 22
| 140
|
0e90de83b6b7ad18057ba7b1ccbf25ae095b9530
| 5,018
|
py
|
Python
|
src/rosdistro/release_cache.py
|
andre-rosa/rosdistro-1
|
62b79df2adc466ec0ea239e9210dcb26cac558ab
|
[
"BSD-3-Clause"
] | 742
|
2017-07-05T02:49:36.000Z
|
2022-03-30T12:55:43.000Z
|
src/rosdistro/release_cache.py
|
andre-rosa/rosdistro-1
|
62b79df2adc466ec0ea239e9210dcb26cac558ab
|
[
"BSD-3-Clause"
] | 94
|
2015-01-09T19:45:10.000Z
|
2022-03-22T18:44:49.000Z
|
src/rosdistro/release_cache.py
|
andre-rosa/rosdistro-1
|
62b79df2adc466ec0ea239e9210dcb26cac558ab
|
[
"BSD-3-Clause"
] | 425
|
2017-07-04T22:03:29.000Z
|
2022-03-29T06:59:06.000Z
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from .release_file import ReleaseFile
| 47.339623
| 248
| 0.702272
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from .release_file import ReleaseFile
class ReleaseCache(object):
_type = 'cache'
def __init__(self, name, data=None, distribution_file_data=None):
assert data or distribution_file_data
if data:
assert 'type' in data, "Expected file type is '%s'" % ReleaseCache._type
assert data['type'] == ReleaseCache._type, "Expected file type is '%s', not '%s'" % (ReleaseCache._type, data['type'])
assert 'version' in data, "Release cache file for '%s' lacks required version information" % name
self.version = int(data['version'])
assert self.version > 1, "Unable to handle '%s' format version '%d' anymore, please update your '%s' file to version '2'" % (ReleaseCache._type, self.version, ReleaseCache._type)
assert self.version == 2, "Unable to handle '%s' format version '%d', please update rosdistro (e.g. on Ubuntu/Debian use: sudo apt-get update && sudo apt-get install --only-upgrade python-rosdistro)" % (ReleaseCache._type, self.version)
assert 'name' in data, "Release cache file for '%s' lacks required name information" % name
assert data['name'] == name, "Release cache file for '%s' does not match the name '%s'" % (name, data['name'])
else:
self.version = 2
self._distribution_file_data = data['distribution_file'] if data else distribution_file_data
self.release_file = ReleaseFile(name, self._distribution_file_data)
self.package_xmls = data['release_package_xmls'] if data else {}
# for backward compatibility only
def __getattr__(self, name):
if name == 'release_package_xmls':
return self.package_xmls
raise AttributeError
def get_data(self):
data = {}
data['type'] = 'cache'
data['version'] = 2
data['name'] = self.release_file.name
data['distribution_file'] = self._distribution_file_data
data['package_xmls'] = self.package_xmls
return data
def update_distribution(self, distribution_file_data):
# remove packages which are not in the old distribution file
self._remove_obsolete_entries()
self._distribution_file_data = distribution_file_data
rel_file = ReleaseFile(self.distribution_file.name, self._distribution_file_data)
# remove all package xmls if repository information has changed
for pkg_name in sorted(rel_file.packages.keys()):
if pkg_name not in self.release_file.packages:
continue
if pkg_name in self.package_xmls and self._get_repo_info(rel_file, pkg_name) != self._get_repo_info(self.release_file, pkg_name):
del self.package_xmls[pkg_name]
self.release_file = rel_file
# remove packages which are not in the new distribution file
self._remove_obsolete_entries()
def _get_repo_info(self, dist_file, pkg_name):
pkg = dist_file.packages[pkg_name]
repo = dist_file.repositories[pkg.repository_name]
return (repo.version, repo.url)
def _remove_obsolete_entries(self):
for pkg_name in self.package_xmls.keys():
if pkg_name not in self.release_file.packages:
print('- REMOVE', pkg_name)
del self.package_xmls[pkg_name]
| 3,044
| 227
| 23
|
b7c91d97580e3065a00b75229cb7cc420751a1e5
| 756
|
py
|
Python
|
testing/noauth/test_ticker.py
|
North14/avanza
|
bc2b0054ba9e8f93ebeaad14acd19452e60f8713
|
[
"MIT"
] | 11
|
2020-03-17T08:17:12.000Z
|
2021-11-27T12:18:14.000Z
|
testing/noauth/test_ticker.py
|
North14/avanza
|
bc2b0054ba9e8f93ebeaad14acd19452e60f8713
|
[
"MIT"
] | 19
|
2020-03-12T09:44:33.000Z
|
2021-04-29T21:15:50.000Z
|
testing/noauth/test_ticker.py
|
North14/avanza
|
bc2b0054ba9e8f93ebeaad14acd19452e60f8713
|
[
"MIT"
] | null | null | null |
import avanza
| 34.363636
| 51
| 0.740741
|
import avanza
def test_ticker():
msft = avanza.Ticker(3873)
assert isinstance(msft.info, dict)
assert isinstance(msft.buy_price, float)
assert isinstance(msft.sell_price, float)
assert isinstance(msft.last_price, float)
assert isinstance(msft.symbol, str)
assert isinstance(msft.currency, str)
assert isinstance(msft.isin, str)
assert isinstance(msft.marketplace, str)
assert isinstance(msft.name, str)
assert isinstance(msft.change, float)
assert isinstance(msft.change_percent, float)
assert isinstance(msft.flag_code, str)
assert isinstance(msft.country, str)
assert isinstance(msft.id, int)
assert isinstance(msft.quote_updated, str)
assert isinstance(msft.last_price_updated, str)
| 718
| 0
| 23
|
c51857464ccdc837ef6d5ed77c494587bc7bc5b0
| 612
|
py
|
Python
|
tests/metadata_test.py
|
jeafreezy/rsgis
|
a0e4b1a2ac87a6d66a323bb86fc81722dcc948df
|
[
"Apache-2.0"
] | 4
|
2020-07-23T10:22:56.000Z
|
2021-04-08T20:55:52.000Z
|
tests/metadata_test.py
|
marx-keyz/rsgis
|
a0e4b1a2ac87a6d66a323bb86fc81722dcc948df
|
[
"Apache-2.0"
] | 1
|
2020-07-23T19:19:26.000Z
|
2020-07-23T23:03:44.000Z
|
tests/metadata_test.py
|
marx-keyz/rsgis
|
a0e4b1a2ac87a6d66a323bb86fc81722dcc948df
|
[
"Apache-2.0"
] | 2
|
2020-07-23T16:08:41.000Z
|
2020-07-24T08:24:21.000Z
|
import unittest
from rsgis import Metadata
import os
if __name__ == '__main__':
unittest.main()
| 17
| 70
| 0.49183
|
import unittest
from rsgis import Metadata
import os
class MetadataTest(unittest.TestCase):
def test_file(self):
dir=os.getcwd() + '\data'
for _ in os.listdir(dir):
path = dir + '\\' + _
if _.endswith('txt') and _.split('.')[0].endswith('_MTL'):
try:
with open(path,'r') as file:
if file.readlines():
self.assertEqual(Metadata(path).path,path)
except Exception as err:
raise err
if __name__ == '__main__':
unittest.main()
| 438
| 17
| 50
|
5c4333ba36422bc2f175bfb930c540fae03012e8
| 162
|
py
|
Python
|
backend/schemas/schemas.py
|
vasconcel0s/todo
|
062a85c4de835e209ff8d1ed7023b1c485fd9c7c
|
[
"MIT"
] | null | null | null |
backend/schemas/schemas.py
|
vasconcel0s/todo
|
062a85c4de835e209ff8d1ed7023b1c485fd9c7c
|
[
"MIT"
] | null | null | null |
backend/schemas/schemas.py
|
vasconcel0s/todo
|
062a85c4de835e209ff8d1ed7023b1c485fd9c7c
|
[
"MIT"
] | null | null | null |
from ma import ma
from models.entities import Todo
| 18
| 42
| 0.703704
|
from ma import ma
from models.entities import Todo
class TodoSchema(ma.SQLAlchemyAutoSchema):
class Meta:
model = Todo
load_instance = True
| 0
| 87
| 23
|
2f45665c787c089aa3b99a2dafcb5fde0d81a10b
| 133
|
py
|
Python
|
main.py
|
xaviermarquez-alba/ulauncher-virtualbox
|
caddcce5a0c401278bf726c03b3446c33b594f30
|
[
"MIT"
] | 7
|
2019-09-25T23:51:24.000Z
|
2021-12-19T00:21:51.000Z
|
main.py
|
xaviermarquez-alba/ulauncher-virtualbox
|
caddcce5a0c401278bf726c03b3446c33b594f30
|
[
"MIT"
] | 3
|
2019-09-22T01:57:20.000Z
|
2020-12-25T10:47:11.000Z
|
main.py
|
xaviermarquez-alba/ulauncher-virtualbox
|
caddcce5a0c401278bf726c03b3446c33b594f30
|
[
"MIT"
] | 1
|
2020-09-22T08:38:10.000Z
|
2020-09-22T08:38:10.000Z
|
from ulauncher_virtualbox.VirtualboxExtension import VirtualboxExtension
if __name__ == '__main__':
VirtualboxExtension().run()
| 26.6
| 72
| 0.81203
|
from ulauncher_virtualbox.VirtualboxExtension import VirtualboxExtension
if __name__ == '__main__':
VirtualboxExtension().run()
| 0
| 0
| 0
|
2b48b084df38c996ed224cdc728cc277df5cc0f8
| 1,710
|
py
|
Python
|
test.py
|
lhuang001/temperature_simulation
|
ad2bb9e20b09c8997d6bcddb9fbc73f17b2186e6
|
[
"MIT"
] | null | null | null |
test.py
|
lhuang001/temperature_simulation
|
ad2bb9e20b09c8997d6bcddb9fbc73f17b2186e6
|
[
"MIT"
] | null | null | null |
test.py
|
lhuang001/temperature_simulation
|
ad2bb9e20b09c8997d6bcddb9fbc73f17b2186e6
|
[
"MIT"
] | null | null | null |
#! -*- coding:utf8 -*-
# This file used to calculate avg temperature of everyday
import sys
import numpy as np
import pandas as pd
from filter import check_threshold, check_data_integrity, check_column_name
def calculate_average(data, column_names):
"""计算温度相关列的平均值
:param data:
:param column_names:
:return:
"""
temperatures = []
grouped_data = data.groupby('day')
for name, group in grouped_data:
temperature = {}
temperature['date'] = name
for cn in column_names:
_cal_ave(temperature, group, cn)
temperatures.append(temperature.copy())
temperatures_dataframe = pd.DataFrame(temperatures)
return temperatures_dataframe
def _cal_ave(temperature, df, column_name):
""" 计算指定列的平均值
:return:
"""
# check today temperature is valid
if check_column_name(column_name) and check_threshold(df, 50, column_name) and check_data_integrity(df, None):
temperature[column_name] = df[column_name].mean()
else:
temperature[column_name] = np.NaN
if __name__ == '__main__':
input, output = sys.argv[1:]
data, column_names = read(input)
# 日期不参与计算
column_names.remove('day')
tem_df = calculate_average(data, column_names)
tem_df.to_csv(output)
| 28.032787
| 114
| 0.690643
|
#! -*- coding:utf8 -*-
# This file used to calculate avg temperature of everyday
import sys
import numpy as np
import pandas as pd
from filter import check_threshold, check_data_integrity, check_column_name
def read(input_file):
origin_data = pd.read_csv(input_file, header=[0], sep=",")
""" get some columns and dalete rows whose value is NaN """
column_names = origin_data.columns.values.tolist()
valid_column_names = [cn for cn in column_names if check_column_name(cn)]
valid_column_names.insert(0, 'day')
data = origin_data[valid_column_names]
data = data.dropna()
return data, valid_column_names
def calculate_average(data, column_names):
"""计算温度相关列的平均值
:param data:
:param column_names:
:return:
"""
temperatures = []
grouped_data = data.groupby('day')
for name, group in grouped_data:
temperature = {}
temperature['date'] = name
for cn in column_names:
_cal_ave(temperature, group, cn)
temperatures.append(temperature.copy())
temperatures_dataframe = pd.DataFrame(temperatures)
return temperatures_dataframe
def _cal_ave(temperature, df, column_name):
""" 计算指定列的平均值
:return:
"""
# check today temperature is valid
if check_column_name(column_name) and check_threshold(df, 50, column_name) and check_data_integrity(df, None):
temperature[column_name] = df[column_name].mean()
else:
temperature[column_name] = np.NaN
if __name__ == '__main__':
input, output = sys.argv[1:]
data, column_names = read(input)
# 日期不参与计算
column_names.remove('day')
tem_df = calculate_average(data, column_names)
tem_df.to_csv(output)
| 404
| 0
| 23
|
1f31928f96ab774fcfe1bc6af6461ddc6ef49f76
| 6,231
|
py
|
Python
|
prepare_data.py
|
griff4692/question_generation
|
f5c0fc56b6520bced02c376a8da275bf84311feb
|
[
"MIT"
] | null | null | null |
prepare_data.py
|
griff4692/question_generation
|
f5c0fc56b6520bced02c376a8da275bf84311feb
|
[
"MIT"
] | null | null | null |
prepare_data.py
|
griff4692/question_generation
|
f5c0fc56b6520bced02c376a8da275bf84311feb
|
[
"MIT"
] | null | null | null |
import os
import logging
from dataclasses import dataclass, field
from typing import Optional
import pandas as pd
import torch
from tqdm import tqdm
from transformers import T5Tokenizer, BartTokenizer, HfArgumentParser
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task: str = field(
metadata={"help":
"Which task 'qa', 'qg', 'e2e_qg', 'ans_ext', 'multi'. 'multi' means 'qa', 'qg', 'ans_ext' tasks"},
)
model_type: str = field(metadata={"help": "One of 't5', 'bart'"})
dataset_path: Optional[str] = field(
default='~/Desktop/aqa/data/squad',
metadata={'help': 'data directory for train and validation csv files.'}
)
train_file_name: Optional[str] = field(
default=None,
metadata={"help": "name for cached train dataset"},
)
valid_file_name: Optional[str] = field(
default=None,
metadata={"help": "name for cached valid dataset"},
)
valid_for_qg_only: bool = field(
default=False,
metadata={"help": "For multitask dataset valid split should contain only qg task or all tasks."}
)
qg_format: Optional[str] = field(
default='highlight_qg_format',
metadata={'help': "How to format inputs for que generation, 'highlight_qg_format' or 'prepend_qg_format'"},
)
max_source_length: Optional[int] = field(
default=512,
metadata={'help': 'Max input length for the source text'},
)
max_target_length: Optional[int] = field(
default=64,
metadata={'help': 'Max input length for the target text'},
)
if __name__ == "__main__":
main()
| 34.425414
| 120
| 0.644359
|
import os
import logging
from dataclasses import dataclass, field
from typing import Optional
import pandas as pd
import torch
from tqdm import tqdm
from transformers import T5Tokenizer, BartTokenizer, HfArgumentParser
logger = logging.getLogger(__name__)
class QGDataset(torch.utils.data.Dataset):
def __init__(self, examples, dtype, model_type, max_source_length=512, max_target_length=32):
self.examples = examples[:1000]
self.dtype = dtype
self.max_source_length = max_source_length
self.max_target_length = max_target_length
self.model_type = model_type
if model_type == 't5':
self.sep_token = '<sep>'
elif model_type == 'bart':
self.sep_token = '<sep>'
else:
self.sep_token = '[SEP]'
@staticmethod
def _add_eos(example):
example['graph_seq'] += ' </s>'
example['q_toks'] += ' </s>'
return example
def preprocess(self, tokenizer):
for i, example in tqdm(enumerate(self), total=len(self)):
example = self._add_eos(example)
source_encoding = tokenizer.encode_plus(
example['graph_seq'],
max_length=self.max_source_length,
padding='max_length',
pad_to_max_length=True,
truncation=True,
return_tensors='pt'
)
target_encoding = tokenizer.encode_plus(
example['q_toks'],
max_length=self.max_target_length,
padding='max_length',
pad_to_max_length=True,
truncation=True,
return_tensors='pt'
)
encodings = {
'source_ids': source_encoding['input_ids'],
'target_ids': target_encoding['input_ids'],
'attention_mask': source_encoding['attention_mask'],
}
example.update(encodings)
self.examples[i] = example
def __getitem__(self, item):
return self.examples[item]
def __len__(self):
return len(self.examples)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
task: str = field(
metadata={"help":
"Which task 'qa', 'qg', 'e2e_qg', 'ans_ext', 'multi'. 'multi' means 'qa', 'qg', 'ans_ext' tasks"},
)
model_type: str = field(metadata={"help": "One of 't5', 'bart'"})
dataset_path: Optional[str] = field(
default='~/Desktop/aqa/data/squad',
metadata={'help': 'data directory for train and validation csv files.'}
)
train_file_name: Optional[str] = field(
default=None,
metadata={"help": "name for cached train dataset"},
)
valid_file_name: Optional[str] = field(
default=None,
metadata={"help": "name for cached valid dataset"},
)
valid_for_qg_only: bool = field(
default=False,
metadata={"help": "For multitask dataset valid split should contain only qg task or all tasks."}
)
qg_format: Optional[str] = field(
default='highlight_qg_format',
metadata={'help': "How to format inputs for que generation, 'highlight_qg_format' or 'prepend_qg_format'"},
)
max_source_length: Optional[int] = field(
default=512,
metadata={'help': 'Max input length for the source text'},
)
max_target_length: Optional[int] = field(
default=64,
metadata={'help': 'Max input length for the target text'},
)
def main():
parser = HfArgumentParser((DataTrainingArguments,))
data_args = parser.parse_args_into_dataclasses()[0]
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO
)
if data_args.model_type == 't5':
tokenizer = T5Tokenizer.from_pretrained('t5-base')
else:
tokenizer = T5Tokenizer.from_pretrained('bart-base')
tokenizer.add_tokens(['<sep>', '<hl>', '<s>', '<o>', '<v>'])
train_dataset = pd.read_csv(os.path.join(data_args.dataset_path, 'dataset_train.csv'))
valid_dataset = pd.read_csv(os.path.join(data_args.dataset_path, 'dataset_validation.csv'))
train_dataset.dropna(inplace=True)
valid_dataset.dropna(inplace=True)
min_qrecall = 0.33
train_dataset = train_dataset[train_dataset['q_tok_recall'] >= min_qrecall]
valid_dataset = valid_dataset[valid_dataset['q_tok_recall'] >= min_qrecall]
train_records = train_dataset.to_dict('records')
valid_records = valid_dataset.to_dict('records')
train_dataset = QGDataset(
train_records, dtype='train', model_type=data_args.model_type, max_target_length=data_args.max_target_length,
max_source_length=data_args.max_source_length
)
valid_dataset = QGDataset(
valid_records, dtype='validation', model_type=data_args.model_type,
max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length
)
train_dataset.preprocess(tokenizer)
valid_dataset.preprocess(tokenizer)
if data_args.train_file_name is None:
train_file_name = f'train_data_{data_args.task}_{data_args.qg_format}_{data_args.model_type}.pt'
train_path = os.path.join('data', train_file_name)
valid_file_name = f'valid_data_{data_args.task}_{data_args.qg_format}_{data_args.model_type}.pt'
valid_path = os.path.join('data', valid_file_name)
else:
train_path = os.path.join('data', data_args.train_file_name)
valid_path = os.path.join('data', data_args.valid_file_name)
torch.save(train_dataset, train_path)
logger.info(f"saved train dataset at {train_path}")
torch.save(valid_dataset, valid_path)
logger.info(f"saved validation dataset at {valid_path}")
tokenizer_path = f"{data_args.model_type}_qg_tokenizer"
if not os.path.exists(tokenizer_path):
os.mkdir(tokenizer_path)
tokenizer.save_pretrained(tokenizer_path)
logger.info(f"saved tokenizer at {tokenizer_path}")
if __name__ == "__main__":
main()
| 4,230
| 173
| 46
|
f8dcda45e677e991e26e58001fe409a8ed01f4a3
| 822
|
py
|
Python
|
app/__init__.py
|
lpe234/bms
|
20a2242c9c5e52b099c436eac7addfede57c3a6e
|
[
"MIT"
] | 1
|
2019-05-01T08:11:59.000Z
|
2019-05-01T08:11:59.000Z
|
app/__init__.py
|
lpe234/bms
|
20a2242c9c5e52b099c436eac7addfede57c3a6e
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
lpe234/bms
|
20a2242c9c5e52b099c436eac7addfede57c3a6e
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
import os
from flask import Flask, redirect, url_for
from app.config import DevelopmentConfig, ProductionConfig
__author__ = 'lpe234'
# create application
bms_app = Flask(__name__)
if os.environ.get('BMS_ENV') == 'PRODUCTION':
bms_app.config.from_object(ProductionConfig)
else:
bms_app.config.from_object(DevelopmentConfig)
# db
from app.models import db
db.init_app(bms_app)
# login
from app.login_utils import login_manager
login_manager.init_app(bms_app)
# main blueprint
from app.main_views import main
bms_app.register_blueprint(main, url_prefix='/main/')
# api blueprint
from app.api import api
bms_app.register_blueprint(api, url_prefix='/api/')
@bms_app.route('/')
if __name__ == '__main__':
bms_app.run()
| 18.266667
| 58
| 0.750608
|
# -*- coding: UTF-8 -*-
import os
from flask import Flask, redirect, url_for
from app.config import DevelopmentConfig, ProductionConfig
__author__ = 'lpe234'
# create application
bms_app = Flask(__name__)
if os.environ.get('BMS_ENV') == 'PRODUCTION':
bms_app.config.from_object(ProductionConfig)
else:
bms_app.config.from_object(DevelopmentConfig)
# db
from app.models import db
db.init_app(bms_app)
# login
from app.login_utils import login_manager
login_manager.init_app(bms_app)
# main blueprint
from app.main_views import main
bms_app.register_blueprint(main, url_prefix='/main/')
# api blueprint
from app.api import api
bms_app.register_blueprint(api, url_prefix='/api/')
@bms_app.route('/')
def index():
return redirect(url_for('main.index'))
if __name__ == '__main__':
bms_app.run()
| 34
| 0
| 22
|
e8b6f68f583fbe914e7e2b21c7582a39be6f4efa
| 11,287
|
py
|
Python
|
ypp.py
|
tssga-arch/myotc
|
4fd166f4b59b856f68d531f9ef6c5cb9419a43fa
|
[
"BSD-2-Clause"
] | null | null | null |
ypp.py
|
tssga-arch/myotc
|
4fd166f4b59b856f68d531f9ef6c5cb9419a43fa
|
[
"BSD-2-Clause"
] | null | null | null |
ypp.py
|
tssga-arch/myotc
|
4fd166f4b59b856f68d531f9ef6c5cb9419a43fa
|
[
"BSD-2-Clause"
] | 1
|
2021-12-01T12:44:27.000Z
|
2021-12-01T12:44:27.000Z
|
#!/usr/bin/env python3
import base64
import yaml
import sys
import re
import os
import random
import string
import subprocess
import json
from d3des import encrypt as d3des
try:
from passlib.hash import md5_crypt, sha256_crypt, sha512_crypt
from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend as crypto_default_backend
except ImportError:
pass
###################################################################
#
# YAML related utilities
#
###################################################################
yaml_include_path = []
secrets_file = '_secrets_file_'
key_store = '_ssh_key_store_'
yaml_pp_vars = dict(os.environ)
valid_re = re.compile(r'^[_A-Za-z][_A-Za-z0-9]*$')
include_res = [ re.compile(r'^(\s*)#\s*include\s+') , re.compile(r'^(\s*-\s*)#\s*include\s+')]
include_type = re.compile(r'\s*--(raw|bin)\s+')
keygen_re = re.compile(r'(.*)\$KEYGEN:([A-Za-z][A-Za-z0-9]*)(:[^\$]*|)\$')
pwgen_re = re.compile(r'(.*)\$PWGEN:([A-Za-z][A-Za-z0-9]*)(:[^\$]*|)\$')
define_re = re.compile(r'^\s*#\s*define\s+([_A-Za-z][_A-Za-z0-9]*)\s*')
ifdef_re = re.compile(r'^\s*#\s*ifdef\s+([_A-Za-z][_A-Za-z0-9]*)\s*')
ifndef_re = re.compile(r'^\s*#\s*ifndef\s+([_A-Za-z][_A-Za-z0-9]*)\s*')
else_re = re.compile(r'^\s*#\s*else\s*')
endif_re = re.compile(r'^\s*#\s*endif\s*')
exec_re = re.compile(r'^(\s*)#\s*exec\s+(.*)$')
###################################################################
#
# Main command line
#
###################################################################
if __name__ == '__main__':
from argparse import ArgumentParser, Action
cli = ArgumentParser(prog='ypp',description='YAML file pre-processor')
cli.add_argument('-I','--include', help='Add Include path', action='append')
cli.add_argument('-D','--define', help='Add constant', action='append')
cli.add_argument('-y','--yaml', help='Parse YAML',action='store_true')
cli.add_argument('-p','--preproc', help='Use pre-processor when parsing yaml',action='store_true')
cli.add_argument('file', help='YAML file to parse')
args = cli.parse_args()
yparse_cmd(args)
sys.exit()
| 28.793367
| 100
| 0.576504
|
#!/usr/bin/env python3
import base64
import yaml
import sys
import re
import os
import random
import string
import subprocess
import json
from d3des import encrypt as d3des
try:
from passlib.hash import md5_crypt, sha256_crypt, sha512_crypt
from cryptography.hazmat.primitives import serialization as crypto_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend as crypto_default_backend
except ImportError:
pass
###################################################################
#
# YAML related utilities
#
###################################################################
yaml_include_path = []
secrets_file = '_secrets_file_'
key_store = '_ssh_key_store_'
yaml_pp_vars = dict(os.environ)
valid_re = re.compile(r'^[_A-Za-z][_A-Za-z0-9]*$')
def yaml_init(inc_path, predef):
if not secrets_file in yaml_pp_vars:
if os.path.isfile('_secrets.yaml'):
yaml_pp_vars[secrets_file] = '_secrets.yaml'
elif os.path.isfile('../secrets/_secrets.yaml'):
yaml_pp_vars[secrets_file] = '../secrets/_secrets.yaml'
else:
yaml_pp_vars[secrets_file] = '_secrets.yaml'
if not key_store in yaml_pp_vars:
if os.path.isdir('_keys_'):
yaml_pp_vars[key_store] = '_keys_'
elif os.path.isdir('../secrets'):
yaml_pp_vars[key_store] = '../secrets'
else:
yaml_pp_vars[key_store] = '_keys_'
# ~ print("secrets_file: "+yaml_pp_vars[secrets_file])
# ~ print("key_store: " + yaml_pp_vars[key_store])
if inc_path:
for inc in inc_path:
if os.path.isdir(inc):
yaml_include_path.append(inc)
if predef:
for kvp in predef:
if '=' in kvp:
kvp = kvp.split('=',1)
key = kvp[0]
val = kvp[1]
else:
key = kvp
val = ''
if valid_re.match(key):
yaml_pp_vars[key] = val
else:
print('{} is not a valid name'.format(key))
def yaml_findfile(fname, prev):
if fname[0] == '/':
# This is an absolute path!
return fname
if prev:
dn = os.path.dirname(prev)
if dn == '':
tname = fname
else:
tname = '{}/{}'.format(dn,fname)
if os.path.isfile(tname): return tname
for dn in yaml_include_path:
tname = '{}/{}'.format(dn,fname)
if os.path.isfile(tname): return tname
# Otherwise just hope for the best!
return fname
include_res = [ re.compile(r'^(\s*)#\s*include\s+') , re.compile(r'^(\s*-\s*)#\s*include\s+')]
include_type = re.compile(r'\s*--(raw|bin)\s+')
def yaml_inc(line):
for inc_re in include_res:
mv = inc_re.match(line)
if mv is None: continue
fname = line[mv.end():]
prefix = mv.group(1)
mv = include_type.match(fname)
if mv:
fname = fname[mv.end():]
inctype = mv.group(1)
else:
inctype = None
return { 'file': fname, 'prefix': prefix, 'type': inctype }
return None
def yaml_raw(fname, prefix = '', prev = None):
txt = ''
prefix2 = prefix.replace('-',' ')
fname = yaml_findfile(fname, prev)
with open(fname,'r') as f:
for line in f:
if line.endswith("\n"): line = line[:-1]
if line.endswith("\r"): line = line[:-1]
txt += prefix + line + "\n"
prefix = prefix2
return txt
def yaml_bin(fname, prefix = '', prev = None):
txt = ''
prefix2 = prefix.replace('-',' ')
fname = yaml_findfile(fname, prev)
with open(fname,'rb') as f:
b64 = base64.b64encode(f.read()).decode('ascii')
i = 0
while i < len(b64):
txt += prefix + b64[i:i+76] + "\n"
prefix = prefix2
i += 76
return txt
keygen_re = re.compile(r'(.*)\$KEYGEN:([A-Za-z][A-Za-z0-9]*)(:[^\$]*|)\$')
def sshkeygen(line):
mv = keygen_re.match(line)
if not mv: return line
if mv.group(1)[-1] == '$':
return line[:len(mv.group(1))-1] + line[len(mv.group(1)):]
store = mv.group(2)
mode = 'pub'
key_sz = 2048
for opt in mv.group(3).split(':'):
if not opt: continue
if opt == 'pub' or opt == 'priv':
mode = opt
continue
elif opt.isnumeric():
key_sz = int(opt)
keydir= yaml_pp_vars[key_store]
if not os.path.isdir(keydir): os.mkdir(keydir)
if os.path.isfile(keydir + "/" + store) and os.path.isfile(keydir + '/' + store + '.pub'):
with open(keydir + "/" + store,'r') as fp:
private_key = fp.read().strip()
with open(keydir + "/" + store + '.pub','r') as fp:
public_key = fp.read().strip()
else:
key = rsa.generate_private_key(
backend=crypto_default_backend(),
public_exponent=65537,
key_size=key_sz
)
private_key = key.private_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PrivateFormat.TraditionalOpenSSL,
crypto_serialization.NoEncryption()
).decode('ascii')
public_key = key.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
).decode('ascii')
with open(keydir + "/" + store,'w') as fp:
fp.write(private_key + "\n")
with open(keydir + "/" + store + '.pub','w') as fp:
fp.write(public_key + "\n")
if mode == 'pub':
okey = public_key
else:
okey = private_key
lines = []
for part in okey.split("\n"):
lines.append(line[:len(mv.group(1))] + part + line[len(mv.group(0)):])
return "\n".join(lines)
pwgen_re = re.compile(r'(.*)\$PWGEN:([A-Za-z][A-Za-z0-9]*)(:[^\$]*|)\$')
def pwgen(line):
secrets = None
mv = pwgen_re.match(line)
while mv:
if mv.group(1)[-1] == '$':
return line[:len(mv.group(1))-1] + line[len(mv.group(1)):]
store = mv.group(2)
pwlen = 12
encode = ''
for opt in mv.group(3).split(':'):
if not opt: continue
if opt == 'MD5' or opt == 'SHA256' or opt == 'SHA512' or opt == 'vnc':
encode = opt
elif opt.isnumeric():
pwlen = int(opt)
if secrets is None:
if os.path.isfile(yaml_pp_vars[secrets_file]):
with open(yaml_pp_vars[secrets_file],'r') as fp:
secrets = yaml.safe_load(fp)
else:
secrets = {}
if store in secrets:
passwd = secrets[store]
else:
charset = string.ascii_lowercase + string.ascii_uppercase + string.digits
passwd = ''.join(random.sample(charset, pwlen))
secrets[store] = passwd
with open(yaml_pp_vars[secrets_file],'w') as fp:
fp.write(yaml.dump(secrets))
print('Generated password for {store} as {passwd}'.format(store=store,passwd=passwd))
if encode == 'MD5':
cpassw = md5_crypt.hash(passwd)
elif encode == 'SHA256':
cpassw = sha256_crypt.hash(passwd,rounds=5000)
elif encode == 'SHA512':
cpassw = sha512_crypt.hash(passwd,rounds=5000)
elif encode == 'vnc':
cpassw = d3des(passwd)
else:
cpassw = passwd
line = line[:len(mv.group(1))] + cpassw + line[len(mv.group(0)):]
mv = pwgen_re.match(line)
return line
define_re = re.compile(r'^\s*#\s*define\s+([_A-Za-z][_A-Za-z0-9]*)\s*')
ifdef_re = re.compile(r'^\s*#\s*ifdef\s+([_A-Za-z][_A-Za-z0-9]*)\s*')
ifndef_re = re.compile(r'^\s*#\s*ifndef\s+([_A-Za-z][_A-Za-z0-9]*)\s*')
else_re = re.compile(r'^\s*#\s*else\s*')
endif_re = re.compile(r'^\s*#\s*endif\s*')
exec_re = re.compile(r'^(\s*)#\s*exec\s+(.*)$')
def yaml_pp(fname, prefix = '', prev = None):
txt = ''
prefix2 = prefix.replace('-',' ')
cond_stack = []
fname = yaml_findfile(fname, prev)
with open(fname,'r') as f:
for line in f:
if line.endswith("\n"): line = line[:-1]
if line.endswith("\r"): line = line[:-1]
if len(cond_stack):
# In Conditional
mv = else_re.match(line)
if mv:
# It is an else match...
cond_stack[0] = not cond_stack[0]
continue
mv = endif_re.match(line)
if mv:
# It is an endif match... so pop the stack!
cond_stack = cond_stack[1:]
continue
if not cond_stack[0]:
# supressing output...
mv = ifdef_re.match(line)
if mv:
# handle a nested ifdef
cond_stack.insert(0,False)
continue
mv = ifndef_re.match(line)
if mv:
# handle a nested ifndef
cond_stack.insert(0,False)
continue
continue
mv = ifdef_re.match(line)
if mv:
if mv.group(1) in yaml_pp_vars:
cond_stack.insert(0,True)
else:
cond_stack.insert(0,False)
continue
mv = ifndef_re.match(line)
if mv:
if mv.group(1) in yaml_pp_vars:
cond_stack.insert(0,False)
else:
cond_stack.insert(0,True)
continue
mv = define_re.match(line)
if mv:
yaml_pp_vars[mv.group(1)] = line[mv.end():].format(**yaml_pp_vars)
continue
mv = yaml_inc(line)
if mv:
if mv['type'] == 'raw':
txt += yaml_raw(mv['file'], prefix = prefix2+mv['prefix'], prev=fname)
elif mv['type'] == 'bin':
txt += yaml_bin(mv['file'], prefix = prefix2+mv['prefix'], prev=fname)
else:
txt += yaml_pp(mv['file'], prefix = prefix2+mv['prefix'], prev=fname)
continue
mv = exec_re.match(line)
if mv:
cwd = os.path.dirname(fname)
if cwd == '': cwd=None
rc = subprocess.run(mv.group(2),
capture_output=True,
shell=True,
text=True,
cwd=cwd)
if rc.returncode != 0:
sys.stderr.write('Command: {cmd} exited status {st}\n'.format(
cmd=mv.group(1),
st=rc.returncode))
if rc.stderr != '': sys.stderr.write(rc.stderr)
for i in rc.stdout.split('\n'):
txt += prefix + mv.group(1) + i +'\n'
continue
line = prefix + line.format(**yaml_pp_vars)
txt += sshkeygen(pwgen(line)) + "\n"
prefix = prefix2
return txt
def yparse_cmd(args):
if args.yaml:
if args.preproc:
yaml_init(args.include, args.define)
ytxt = yaml_pp(args.file)
else:
ytxt = open(args.file, 'r')
res = yaml.safe_load(ytxt)
print(json.dumps(res))
else:
yaml_init(args.include, args.define)
txt = yaml_pp(args.file)
print(txt)
def dump(data):
return yaml.dump(data)
def process(yamlfile, includes, defines):
yaml_init(includes, defines)
return yaml.safe_load(yaml_pp(yamlfile))
def load(thing):
return yaml.safe_load(thing)
###################################################################
#
# Main command line
#
###################################################################
if __name__ == '__main__':
from argparse import ArgumentParser, Action
cli = ArgumentParser(prog='ypp',description='YAML file pre-processor')
cli.add_argument('-I','--include', help='Add Include path', action='append')
cli.add_argument('-D','--define', help='Add constant', action='append')
cli.add_argument('-y','--yaml', help='Parse YAML',action='store_true')
cli.add_argument('-p','--preproc', help='Use pre-processor when parsing yaml',action='store_true')
cli.add_argument('file', help='YAML file to parse')
args = cli.parse_args()
yparse_cmd(args)
sys.exit()
| 8,787
| 0
| 275
|
a2b1f3047d1f4d6e1a0c89cc693c623246a2513c
| 1,195
|
py
|
Python
|
migrations/versions/1b5bbc75de44_.py
|
deniskorobicyn/kozmic-ci
|
0af754b81891722824c6bea85154590f15931030
|
[
"BSD-3-Clause"
] | 1
|
2021-06-05T18:36:13.000Z
|
2021-06-05T18:36:13.000Z
|
migrations/versions/1b5bbc75de44_.py
|
deniskorobicyn/kozmic-ci
|
0af754b81891722824c6bea85154590f15931030
|
[
"BSD-3-Clause"
] | null | null | null |
migrations/versions/1b5bbc75de44_.py
|
deniskorobicyn/kozmic-ci
|
0af754b81891722824c6bea85154590f15931030
|
[
"BSD-3-Clause"
] | null | null | null |
"""empty message
Revision ID: 1b5bbc75de44
Revises: 1531599e3534
Create Date: 2014-01-02 20:52:28.389571
"""
# revision identifiers, used by Alembic.
revision = '1b5bbc75de44'
down_revision = '1531599e3534'
from alembic import op
import sqlalchemy as sa
from kozmic.models import db
| 30.641026
| 102
| 0.714644
|
"""empty message
Revision ID: 1b5bbc75de44
Revises: 1531599e3534
Create Date: 2014-01-02 20:52:28.389571
"""
# revision identifiers, used by Alembic.
revision = '1b5bbc75de44'
down_revision = '1531599e3534'
from alembic import op
import sqlalchemy as sa
from kozmic.models import db
def upgrade():
op.create_index('ix_build_created_at', 'build', ['created_at'], unique=False)
op.create_index('ix_build_number', 'build', ['number'], unique=False)
op.add_column('hook_call', sa.Column('build_id', sa.Integer(), nullable=False))
try:
from kozmic.models import Job
for job in Job.query.all():
job.hook_call.build_id = job.build_id
finally:
db.session.commit()
op.create_unique_constraint('unique_hook_call_within_build', 'hook_call', ['build_id', 'hook_id'])
op.create_index('ix_organization_gh_id', 'organization', ['gh_id'], unique=False)
def downgrade():
op.drop_index('ix_organization_gh_id', 'organization')
op.drop_constraint('unique_hook_call_within_build', 'hook_call')
op.drop_column('hook_call', 'build_id')
op.drop_index('ix_build_number', 'build')
op.drop_index('ix_build_created_at', 'build')
| 859
| 0
| 46
|
af2fab2d84d178703e2b2b640f8375eb056735fe
| 6,856
|
py
|
Python
|
iexec-apps/0.to-update/randomGenerator/signer/signer.py
|
soumyapal96/decentralized-computing
|
2776a1adb135df1f5714bd68d81ec05a8c469b75
|
[
"MIT"
] | null | null | null |
iexec-apps/0.to-update/randomGenerator/signer/signer.py
|
soumyapal96/decentralized-computing
|
2776a1adb135df1f5714bd68d81ec05a8c469b75
|
[
"MIT"
] | null | null | null |
iexec-apps/0.to-update/randomGenerator/signer/signer.py
|
soumyapal96/decentralized-computing
|
2776a1adb135df1f5714bd68d81ec05a8c469b75
|
[
"MIT"
] | null | null | null |
import os
import sys
import attrdict
import ssl
import json
import zipfile
import random
import traceback
import gnupg
import base64
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
from web3.auto import w3
from eth_account.messages import defunct_hash_message
from shutil import copyfile
keccak256 = w3.soliditySha3
debug = True
if __name__ == '__main__':
sconeDir = '/scone'
iexecOutDir = '/iexec_out'
determinismFile = 'determinism.iexec'
callbackFile = 'callback.iexec'
WriteEnclaveSign(sconeDir + '/' + determinismFile)
copyfile(sconeDir + '/' + callbackFile, iexecOutDir + '/' + callbackFile)
| 30.882883
| 110
| 0.610706
|
import os
import sys
import attrdict
import ssl
import json
import zipfile
import random
import traceback
import gnupg
import base64
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
from web3.auto import w3
from eth_account.messages import defunct_hash_message
from shutil import copyfile
keccak256 = w3.soliditySha3
debug = True
class DigestSigner:
def __init__(self, enclaveKey, worker, taskid, digest):
self.result = digest;
self.resultHash = keccak256([ "bytes32", "bytes32" ], [ taskid, digest ])
self.resultSalt = keccak256([ "address", "bytes32", "bytes32" ], [ worker, taskid, digest ])
hash = defunct_hash_message(keccak256([ "bytes32", "bytes32" ], [ self.resultHash, self.resultSalt ]))
self.signature = w3.eth.account.signHash(hash, private_key=enclaveKey).signature
def jsonify(self):
return json.dumps({
'result': self.result,
'resultHash': self.resultHash.hex(),
'resultSalt': self.resultSalt.hex(),
'signature': self.signature.hex(),
})
def GetPublicKey():
try:
key = open('/iexec_out/public.key', 'rb');
pubKeyObj = RSA.importKey(key.read())
except:
if debug:
print("Public key is not valid, couldn't import it!")
traceback.print_exc()
pubKeyObj = None
key.close()
return pubKeyObj
def WriteEncryptedKey(symmetricKey, pubKeyObj):
print("Encrypting symmetric key")
try:
encryptor = PKCS1_OAEP.new(pubKeyObj)
encrypted = encryptor.encrypt(symmetricKey)
with open('/iexec_out/encrypted_key', 'wb+') as output:
output.write(encrypted)
if debug:
with open('/iexec_out/plaintext_key', 'wb+') as output:
output.write(symmetricKey)
except:
print('Error with opening key!')
traceback.print_exc()
key.close()
def WriteInitializationVector(iv):
print("Writing iv on disk")
try:
ivfile = open('/iexec_out/iv', 'wb+')
except:
traceback.print_exc()
print(ex)
else:
ivfile.write(iv)
finally:
ivfile.close()
def TestReadEncryptedKey():
try:
with open('/iexec_out/private.key', 'rb') as input:
binKey = input.read()
priKeyObj = RSA.importKey(binKey)
with open('/iexec_out/encrypted_key', 'rb') as encrypted:
encrypted_key = encrypted.read()
with open('/iexec_out/plaintext_key', 'rb') as original:
original_key = original.read()
except:
print('Error reading key')
traceback.print_exc()
else:
decryptor = PKCS1_OAEP.new(priKeyObj)
key = decryptor.decrypt(encrypted_key)
assert key == original_key, "Keys don't match"
return key
def TestEncryptedOutput(symmetricKey):
try:
with open('/iexec_out/result.zip.aes', 'rb') as input, open('/iexec_out/iv','rb') as ivfile:
iv = input.read(16)
ivfromfile = ivfile.read()
assert iv == ivfromfile, "Init vector don't match"
encryptedOutput = input.read()
except:
print('Error reading encrypted output')
traceback.print_exc()
else:
decryptedOutput = DecryptOutput(encryptedOutput, symmetricKey, iv)
padNb = decryptedOutput[-1:]
#test padding
assert bytearray(decryptedOutput[-padNb[0]:]) == bytearray(padNb * padNb[0]), "Padding not right!"
#test decrypted equal to original
decryptedOutput = decryptedOutput[:len(decryptedOutput) - padNb[0]]
ZipOutput()
with open('/iexec_out/' + os.env['taskid'] +'_result.zip', 'rb') as input:
originalZip = input.read()
assert(decryptedOutput == originalZip)
with open('/iexec_out/result.test.zip', 'wb+') as output:
output.write(decryptedOutput)
zip_ref = zipfile.ZipFile('iexec_out/result.test.zip', 'r')
zip_ref.extractall('iexec_out')
zip_ref.close()
def DecryptOutput(encryptedOutput, key, iv):
aes = AES.new(key, AES.MODE_CBC, iv)
return aes.decrypt(encryptedOutput)
def ZipOutput():
zipf = zipfile.ZipFile(zippedOutputPath, 'a', zipfile.ZIP_DEFLATED)
os.chdir(zipTargetDirectory)
for root, dirs, files in os.walk('./'):
for file in files:
if file == zipFileName:
continue
print("Writing file " + file + " to zip archive.")
zipf.write(os.path.join(root, file))
zipf.close()
def PadZippedOutput():
print("Padding zipped output")
try:
input = open(zippedOutputPath, 'ab')
zipSize = os.path.getsize(zippedOutputPath)
blockSize = 16
nb = blockSize - zipSize % blockSize
input.write(bytearray(bytes([nb]) * nb))
except Exception as ex:
traceback.print_exc()
print(ex)
def EncryptZippedOutput(pubKeyObj):
try:
input = open(zippedOutputPath, 'rb')
output = open('/iexec_out/result.zip.aes', 'wb+')
#generate initalization vector for AES and prepend it to output
iv = os.getrandom(16)
output.write(iv)
WriteInitializationVector(iv)
#generate AES key and encrypt it/write it on disk
key = os.getrandom(32)
WriteEncryptedKey(key, pubKeyObj)
aes = AES.new(key, AES.MODE_CBC, iv)
buffer_size = 8192
#chunks = iter(lambda: input.read(buffer_size), '')
result = input.read()
#for chunk in chunks:
output.write(aes.encrypt(result))
except Exception as ex:
traceback.print_exc()
def WriteEnclaveSign(digestPath):
import hashlib, os
SHAhash = hashlib.sha3_256()
try:
input = open(digestPath, 'rb')
while 1:
# Read file in as little chunks
buf = input.read(4096)
if not buf : break
SHAhash.update(buf)
input.close()
digest = '0x' + SHAhash.hexdigest()
enclaveKey = os.environ['enclave_key']
taskid = os.environ['taskid']
worker = os.environ['worker']
result = DigestSigner(
enclaveKey = enclaveKey,
worker = worker,
taskid = taskid,
digest = digest,
).jsonify()
with open('/iexec_out/enclaveSig.iexec', 'w+') as outfile:
outfile.write(result)
except Exception as ex:
traceback.print_exc()
print(ex)
if __name__ == '__main__':
sconeDir = '/scone'
iexecOutDir = '/iexec_out'
determinismFile = 'determinism.iexec'
callbackFile = 'callback.iexec'
WriteEnclaveSign(sconeDir + '/' + determinismFile)
copyfile(sconeDir + '/' + callbackFile, iexecOutDir + '/' + callbackFile)
| 5,894
| -2
| 306
|
8d8e9deb6cb7fce5b91c4b151495df2684bdaa88
| 321
|
py
|
Python
|
lightning_baselines3/off_policy_models/__init__.py
|
HenryJia/lightning-baselines3
|
10d1a0eed6136978204323250e37d49915a12e14
|
[
"MIT"
] | 3
|
2021-01-18T23:27:38.000Z
|
2021-10-04T12:07:16.000Z
|
lightning_baselines3/off_policy_models/__init__.py
|
HenryJia/lightning-baselines3
|
10d1a0eed6136978204323250e37d49915a12e14
|
[
"MIT"
] | 8
|
2021-01-21T03:29:29.000Z
|
2021-07-25T18:45:39.000Z
|
lightning_baselines3/off_policy_models/__init__.py
|
HenryJia/lightning-baselines3
|
10d1a0eed6136978204323250e37d49915a12e14
|
[
"MIT"
] | null | null | null |
from lightning_baselines3.off_policy_models.off_policy_model import OffPolicyModel
from lightning_baselines3.off_policy_models.dqn import DQN
from lightning_baselines3.off_policy_models.td3 import TD3
from lightning_baselines3.off_policy_models.ddpg import DDPG
from lightning_baselines3.off_policy_models.sac import SAC
| 53.5
| 82
| 0.906542
|
from lightning_baselines3.off_policy_models.off_policy_model import OffPolicyModel
from lightning_baselines3.off_policy_models.dqn import DQN
from lightning_baselines3.off_policy_models.td3 import TD3
from lightning_baselines3.off_policy_models.ddpg import DDPG
from lightning_baselines3.off_policy_models.sac import SAC
| 0
| 0
| 0
|
fc60fe43d9b2b2ca486b989461b322f0d49ed11c
| 1,302
|
py
|
Python
|
utils/preprocess.py
|
SijRa/mri-analysis
|
a35411bda6e39eff57f715a695b7fb6a30997706
|
[
"MIT"
] | 2
|
2020-02-28T09:53:55.000Z
|
2020-11-25T23:09:19.000Z
|
utils/preprocess.py
|
SijRa/Brain-Image-Analysis-using-Deep-Learning
|
a35411bda6e39eff57f715a695b7fb6a30997706
|
[
"MIT"
] | null | null | null |
utils/preprocess.py
|
SijRa/Brain-Image-Analysis-using-Deep-Learning
|
a35411bda6e39eff57f715a695b7fb6a30997706
|
[
"MIT"
] | 1
|
2020-07-05T09:30:11.000Z
|
2020-07-05T09:30:11.000Z
|
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
default_folds = 5
default_test_size = 0.2
| 59.181818
| 168
| 0.820276
|
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from tensorflow.keras.utils import to_categorical
default_folds = 5
default_test_size = 0.2
def Stratified_KFolds_Generator(folds=default_folds):
return StratifiedKFold(n_splits=folds, shuffle=True)
def Train_Test_Split(features, labels, output_class_dict, test_size=default_test_size, stratify='risk'):
mri_train, mri_test, clinical_train, clinical_test, conversion_train, conversion_test, risk_train, risk_test = train_test_split(features['mri'], features['clinical'],
labels['conversion'], labels['risk'], test_size=test_size, stratify=labels[stratify], shuffle=True)
return mri_train, mri_test, clinical_train, clinical_test, conversion_train, conversion_test, risk_train, risk_test
def Train_Test_Split_Auxiliary(features, labels, output_class_dict, test_size=default_test_size, stratify='class'):
mri_train, mri_test, clinical_train, clinical_test, class_train, class_test = train_test_split(features['mri'], features['clinical'],
labels['class'], test_size=test_size, stratify=labels[stratify], shuffle=True)
return mri_train, mri_test, clinical_train, clinical_test, class_train, class_test
def One_Hot_Encode(y, num_classes):
return to_categorical(y, num_classes)
| 1,013
| 0
| 92
|
78ee0374e197b2f774615713d41f166b315d5555
| 266
|
py
|
Python
|
airmap/__init__.py
|
vasantharajr/AirMapSDK-Embedded
|
a1cc41bad46d9dfba98d7d01e04cb54a38987bf8
|
[
"Apache-2.0"
] | 7
|
2016-10-20T17:50:40.000Z
|
2021-11-28T00:44:39.000Z
|
airmap/__init__.py
|
vasantharajr/AirMapSDK-Embedded
|
a1cc41bad46d9dfba98d7d01e04cb54a38987bf8
|
[
"Apache-2.0"
] | 1
|
2017-01-31T19:40:35.000Z
|
2017-01-31T19:40:35.000Z
|
airmap/__init__.py
|
isabella232/AirMapSDK-Embedded
|
a1cc41bad46d9dfba98d7d01e04cb54a38987bf8
|
[
"Apache-2.0"
] | 3
|
2016-12-03T00:17:59.000Z
|
2021-03-26T12:17:58.000Z
|
"""
Airmap package init code
AirMapSDK
Created by AirMap Team on 6/28/16.
Copyright (c) 2016 AirMap, Inc. All rights reserved.
"""
import airdefs
import connect
import statusAPI
import flightAPI
import drone
import log
import telemetryAPI
import alertsAPI
| 15.647059
| 54
| 0.770677
|
"""
Airmap package init code
AirMapSDK
Created by AirMap Team on 6/28/16.
Copyright (c) 2016 AirMap, Inc. All rights reserved.
"""
import airdefs
import connect
import statusAPI
import flightAPI
import drone
import log
import telemetryAPI
import alertsAPI
| 0
| 0
| 0
|
fb79aa7e7cfa2c2a9d3842b423d77f5b9f93021d
| 737
|
py
|
Python
|
archie_cli/ping.py
|
omeraloni/archie-cli
|
33a16ed5eb6c101b51d4fcc658c9bd2e4e0039b6
|
[
"MIT"
] | null | null | null |
archie_cli/ping.py
|
omeraloni/archie-cli
|
33a16ed5eb6c101b51d4fcc658c9bd2e4e0039b6
|
[
"MIT"
] | null | null | null |
archie_cli/ping.py
|
omeraloni/archie-cli
|
33a16ed5eb6c101b51d4fcc658c9bd2e4e0039b6
|
[
"MIT"
] | null | null | null |
import subprocess
from re import search
from .methods import config_read
| 29.48
| 108
| 0.648575
|
import subprocess
from re import search
from .methods import config_read
def ping(interface, hostname='google.com', retries=1):
ping_cmd = config_read()["ping_cmd"]
command = [f"{ping_cmd}", f"-I {interface}" if interface is not None else "", f"-c {retries}", hostname]
output, err = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
output = output.decode('utf-8')
err = err.decode('utf-8').rstrip('\n')
if err:
raise Exception(err)
try:
result = search(r'ttl=([0-9]+) time=([0-9.]+)', output)
return int(result.group(1)), float(result.group(2)) # ttl, time
except ValueError:
raise Exception('ping: failed to parse output')
| 638
| 0
| 23
|
04018adaf9a69dc5b5a6b134fc21bdd0483c08bb
| 836
|
py
|
Python
|
ABC/abc051-abc100/abc077/c.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 2
|
2020-06-12T09:54:23.000Z
|
2021-05-04T01:34:07.000Z
|
ABC/abc051-abc100/abc077/c.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 961
|
2020-06-23T07:26:22.000Z
|
2022-03-31T21:34:52.000Z
|
ABC/abc051-abc100/abc077/c.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | null | null | null |
'''input
3
1 1 1
2 2 2
3 3 3
27
6
3 14 159 2 6 53
58 9 79 323 84 6
2643 383 2 79 50 288
87
2
1 5
2 4
3 6
3
'''
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
# Problem C
if __name__ == '__main__':
n = int(input())
a = sorted(list(map(int, input().split())))
b = list(map(int, input().split()))
c = sorted(list(map(int, input().split())))
count = 0
# See:
# https://img.atcoder.jp/arc084/editorial.pdf
# https://docs.python.jp/3/library/bisect.html
# https://beta.atcoder.jp/contests/abc077/submissions/1740764
from bisect import bisect_left
from bisect import bisect_right
for number in b:
a_count = bisect_left(a, number)
c_count = n - bisect_right(c, number)
count += a_count * c_count
print(count)
| 17.787234
| 66
| 0.577751
|
'''input
3
1 1 1
2 2 2
3 3 3
27
6
3 14 159 2 6 53
58 9 79 323 84 6
2643 383 2 79 50 288
87
2
1 5
2 4
3 6
3
'''
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
# Problem C
if __name__ == '__main__':
n = int(input())
a = sorted(list(map(int, input().split())))
b = list(map(int, input().split()))
c = sorted(list(map(int, input().split())))
count = 0
# See:
# https://img.atcoder.jp/arc084/editorial.pdf
# https://docs.python.jp/3/library/bisect.html
# https://beta.atcoder.jp/contests/abc077/submissions/1740764
from bisect import bisect_left
from bisect import bisect_right
for number in b:
a_count = bisect_left(a, number)
c_count = n - bisect_right(c, number)
count += a_count * c_count
print(count)
| 0
| 0
| 0
|
a665a8536144bfc59fc5c559c4669cd381303b02
| 779
|
py
|
Python
|
Python3/93_Restore_IP_Addresses.py
|
yangjiahao106/LeetCode
|
c30ba0ef06f444951f7ab8eee495ac43613d7f4f
|
[
"RSA-MD"
] | 1
|
2018-04-28T09:07:11.000Z
|
2018-04-28T09:07:11.000Z
|
Python3/93_Restore_IP_Addresses.py
|
yangjiahao106/LeetCode
|
c30ba0ef06f444951f7ab8eee495ac43613d7f4f
|
[
"RSA-MD"
] | 1
|
2018-02-24T16:26:30.000Z
|
2018-02-24T16:26:44.000Z
|
Python3/93_Restore_IP_Addresses.py
|
yangjiahao106/LeetCode
|
c30ba0ef06f444951f7ab8eee495ac43613d7f4f
|
[
"RSA-MD"
] | null | null | null |
#! python3
# __author__ = "YangJiaHao"
# date: 2018/3/13
if __name__ == '__main__':
so = Solution()
res = so.restoreIpAddresses('010010')
print(res)
| 24.34375
| 76
| 0.462131
|
#! python3
# __author__ = "YangJiaHao"
# date: 2018/3/13
class Solution:
def restoreIpAddresses(self, s):
"""
:type s: str
:rtype: List[str]
"""
result = []
self.dfs('', 0, s, result)
return result
def dfs(self, one, count, rest, result):
if count == 4:
if not rest:
result.append(one[:-1])
return
for i in range(1,4):
if i <= len(rest):
if int(rest[:i]) <= 255:
self.dfs(one+ rest[:i] + '.',count + 1, rest[i:],result)
if rest[0] == '0': # 每个ip字段 不能以0开头。但可以为0
break
if __name__ == '__main__':
so = Solution()
res = so.restoreIpAddresses('010010')
print(res)
| 416
| 206
| 22
|
760a1204242fcbe8695d9c35138a68c8b5f54a3e
| 2,735
|
py
|
Python
|
relatives/tests/migrations/0003_auto_20210927_1618.py
|
treyhunner/django-relatives
|
a578ab135f865df2835957cedfd00476c4b65e18
|
[
"MIT"
] | 10
|
2015-08-14T00:22:52.000Z
|
2021-09-16T08:15:14.000Z
|
relatives/tests/migrations/0003_auto_20210927_1618.py
|
treyhunner/django-relatives
|
a578ab135f865df2835957cedfd00476c4b65e18
|
[
"MIT"
] | 12
|
2015-03-09T20:17:16.000Z
|
2021-09-30T18:46:11.000Z
|
relatives/tests/migrations/0003_auto_20210927_1618.py
|
treyhunner/django-relatives
|
a578ab135f865df2835957cedfd00476c4b65e18
|
[
"MIT"
] | 3
|
2016-01-05T15:20:10.000Z
|
2018-08-03T10:51:23.000Z
|
# Generated by Django 3.2.7 on 2021-09-27 16:18
from django.db import migrations, models
| 36.959459
| 111
| 0.593419
|
# Generated by Django 3.2.7 on 2021-09-27 16:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tests', '0002_shape'),
]
operations = [
migrations.AlterField(
model_name='actor',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='book',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='image',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='journal',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='movie',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='notinadmin',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='pet',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='pirate',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='sailor',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='shape',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='ship',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='something',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 0
| 2,621
| 23
|
854f35d055cfcfb5230df76afe5477aabcc5bb87
| 167
|
py
|
Python
|
web-scraping/crawl.py
|
stivenramireza/nutibara-web-scraping
|
48be40b735f011ac93901a2ca97fcacc6d2d5ca6
|
[
"MIT"
] | 1
|
2019-10-02T14:29:48.000Z
|
2019-10-02T14:29:48.000Z
|
web-scraping/crawl.py
|
stivenramireza/nutibara-web-scraping
|
48be40b735f011ac93901a2ca97fcacc6d2d5ca6
|
[
"MIT"
] | 1
|
2019-10-02T14:42:31.000Z
|
2019-10-02T14:42:31.000Z
|
web-scraping/crawl.py
|
stivenramireza/anutibara-web-scraping
|
48be40b735f011ac93901a2ca97fcacc6d2d5ca6
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
| 23.857143
| 53
| 0.736527
|
import requests
from bs4 import BeautifulSoup
def scrape_html(url):
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
return soup
| 99
| 0
| 23
|
1b801a12009dece66d045f5a3ba8d8132bf9ef27
| 809
|
py
|
Python
|
adminmgr/media/code/python/red3/red3.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 9
|
2019-11-08T02:05:27.000Z
|
2021-12-13T12:06:35.000Z
|
adminmgr/media/code/python/red3/red3.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 6
|
2019-11-27T03:23:16.000Z
|
2021-06-10T19:15:13.000Z
|
adminmgr/media/code/python/red1/red3.py
|
IamMayankThakur/test-bigdata
|
cef633eb394419b955bdce479699d0115d8f99c3
|
[
"Apache-2.0"
] | 4
|
2019-11-26T17:04:27.000Z
|
2021-12-13T11:57:03.000Z
|
#!/usr/bin/python3
import sys
diction={}
for line in sys.stdin:
line=line.strip()
line_val=line.split(',')
bowl,bat,runs,balls=line_val
runs=int(runs)
print(runs)
balls=int(balls)
key=(bowl,bat)
if key in diction:
diction[key][0].append(runs)
diction[key][1].append(balls)
else:
diction[key]=[[],[]]
diction[key][0].append(runs)
diction[key][1].append(balls)
for key in diction.keys();
diction[key][0]=sum(diction[key][0])
diction[key][1]=sum(diction[key][1])
s=sorted(diction,key=func3)
s=sorted(s,key=func2)
s=sorted(s,key=func1,reverse=True)
for k in s:
if diction[k][1]>5:
print('%s,%s,%d,%d' % (k[0],k[1],diction[k][0],diction[k][1]))
| 18.813953
| 67
| 0.625464
|
#!/usr/bin/python3
import sys
diction={}
def func1(x):
return diction[x][0]
def func2(x):
return diction[x][1]
def func3(x):
return x[0]
for line in sys.stdin:
line=line.strip()
line_val=line.split(',')
bowl,bat,runs,balls=line_val
runs=int(runs)
print(runs)
balls=int(balls)
key=(bowl,bat)
if key in diction:
diction[key][0].append(runs)
diction[key][1].append(balls)
else:
diction[key]=[[],[]]
diction[key][0].append(runs)
diction[key][1].append(balls)
for key in diction.keys();
diction[key][0]=sum(diction[key][0])
diction[key][1]=sum(diction[key][1])
s=sorted(diction,key=func3)
s=sorted(s,key=func2)
s=sorted(s,key=func1,reverse=True)
for k in s:
if diction[k][1]>5:
print('%s,%s,%d,%d' % (k[0],k[1],diction[k][0],diction[k][1]))
| 36
| 0
| 75
|
a237da63c14ff5f89757dcbb30f63d405f1fbf03
| 22,521
|
py
|
Python
|
meiduo_mall/meiduo_mall/apps/users/views.py
|
dongwenxi/meiduo
|
144d6bd7a33e479e02e38071a666b0d652f3f24c
|
[
"MIT"
] | 3
|
2019-06-20T11:34:52.000Z
|
2019-08-20T06:25:51.000Z
|
meiduo_mall/meiduo_mall/apps/users/views.py
|
dongwenxi/meiduo
|
144d6bd7a33e479e02e38071a666b0d652f3f24c
|
[
"MIT"
] | null | null | null |
meiduo_mall/meiduo_mall/apps/users/views.py
|
dongwenxi/meiduo
|
144d6bd7a33e479e02e38071a666b0d652f3f24c
|
[
"MIT"
] | 1
|
2019-12-26T08:45:05.000Z
|
2019-12-26T08:45:05.000Z
|
from django.shortcuts import render, redirect, reverse
from django.template import RequestContext
from django.views import View
from django import http
from django.contrib.auth import login, logout, mixins
from django.db import DatabaseError
from django_redis import get_redis_connection
from django.contrib.auth import authenticate
import json, re
from django.conf import settings
from django.core.paginator import Paginator
import logging
from random import randint
from itsdangerous import TimedJSONWebSignatureSerializer as TOKEN
from .models import User, Address
from meiduo_mall.utils.views import LoginRequiredView
from .utils import check_token_to_user, generate_verify_email_url
from celery_tasks.email.tasks import send_verify_email
from celery_tasks.sms.tasks import send_sms_code
from meiduo_mall.utils.response_code import RETCODE
from goods.models import SKU
from carts.utils import merge_cart_cookie_to_redis
from orders.models import OrderInfo
logger = logging.getLogger('django') # 创建日志输出器
# Create your views here.
class UsernameCountView(View):
'''判断用户名是否已经注册'''
class MobileCountView(View):
'''判断手机号是否重复'''
class LogoutView(View):
'''退出登录'''
class UserInfoView(mixins.LoginRequiredMixin, View):
'''用户中心界面'''
class EmailView(mixins.LoginRequiredMixin, View):
'''添加用户邮箱'''
class VerifyEmailView(View):
'''激活邮箱'''
class AddressView(mixins.LoginRequiredMixin, View):
'''用户收获地址'''
def get(self, request):
'''显示用户收货地址界面'''
user = request.user
# 获取当前用户所有的收货地址
address_qs = Address.objects.filter(is_deleted=False, user=user)
address_list = []
for address in address_qs:
address_dict = {
'id': address.id,
'title': address.title,
'receiver': address.receiver,
'province_id': address.province_id,
'province': address.province.name,
'city_id': address.city_id,
'city': address.city.name,
'district_id': address.district_id,
'district': address.district.name,
'place': address.place,
'mobile': address.mobile,
'tel': address.tel,
'email': address.email,
}
address_list.append(address_dict)
context = {
'addresses': address_list,
'default_address_id': user.default_address_id
}
return render(request, 'user_center_site.html', context)
class CreateAddressView(LoginRequiredView):
'''新增收货地址'''
class UpdateDestroyAddressView(LoginRequiredView):
"""修改和删除"""
def put(self, request, address_id):
"""修改地址逻辑"""
# 查询要修改的地址对象
try:
address = Address.objects.get(id=address_id)
except Address.DoesNotExist:
return http.HttpResponseForbidden('要修改的地址不存在')
# 接收
json_dict = json.loads(request.body.decode())
title = json_dict.get('title')
receiver = json_dict.get('receiver')
province_id = json_dict.get('province_id')
city_id = json_dict.get('city_id')
district_id = json_dict.get('district_id')
place = json_dict.get('place')
mobile = json_dict.get('mobile')
tel = json_dict.get('tel')
email = json_dict.get('email')
# 校验
if all([title, receiver, province_id, city_id, district_id, place, mobile]) is False:
return http.HttpResponseForbidden('缺少必传参数')
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('参数mobile有误')
if tel:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):
return http.HttpResponseForbidden('参数tel有误')
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.HttpResponseForbidden('参数email有误')
# 修改
Address.objects.filter(id=address_id).update(
title=title,
receiver=receiver,
province_id=province_id,
city_id=city_id,
district_id=district_id,
place=place,
mobile=mobile,
tel=tel,
email=email
)
address = Address.objects.get(id=address_id) # 要重新查询一次新数据
# 把新增的地址数据响应回去
address_dict = {
'id': address.id,
'title': address.title,
'receiver': address.receiver,
'province_id': address.province_id,
'province': address.province.name,
'city_id': address.city_id,
'city': address.city.name,
'district_id': address.district_id,
'district': address.district.name,
'place': address.place,
'mobile': address.mobile,
'tel': address.tel,
'email': address.email,
}
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'address': address_dict})
# 响应
def delete(self, request, address_id):
"""对收货地址逻辑删除"""
try:
address = Address.objects.get(id=address_id)
except Address.DoesNotExist:
return http.HttpResponseForbidden('要删除的地址不存在')
address.is_deleted = True
# address.delete()
address.save()
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})
class DefaultAddressView(LoginRequiredView):
"""设置默认地址"""
def put(self, request, address_id):
"""实现默认地址"""
try:
address = Address.objects.get(id=address_id)
except Address.DoesNotExist:
return http.HttpResponseForbidden('要修改的地址不存在')
user = request.user
user.default_address = address
user.save()
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})
class UpdateTitleAddressView(LoginRequiredView):
"""修改用户收货地址标题"""
class ChangePasswordView(LoginRequiredView):
"""修改密码"""
class UserBrowseHistory(LoginRequiredView):
'''记录商品浏览记录'''
class FindPasswordView(View):
'''找回密码'''
def get(self, request):
'''渲染找回密码界面'''
return render(request, 'find_password.html')
class UsernameExistView(View):
'''验证用户名是否存在'''
class GenerateSmsCodeView(View):
'''发送短信验证码'''
class SMSVerifyView(View):
'''验证短信验证码'''
class InputPasswordView(View):
'''覆盖原密码'''
| 32.218884
| 123
| 0.592158
|
from django.shortcuts import render, redirect, reverse
from django.template import RequestContext
from django.views import View
from django import http
from django.contrib.auth import login, logout, mixins
from django.db import DatabaseError
from django_redis import get_redis_connection
from django.contrib.auth import authenticate
import json, re
from django.conf import settings
from django.core.paginator import Paginator
import logging
from random import randint
from itsdangerous import TimedJSONWebSignatureSerializer as TOKEN
from .models import User, Address
from meiduo_mall.utils.views import LoginRequiredView
from .utils import check_token_to_user, generate_verify_email_url
from celery_tasks.email.tasks import send_verify_email
from celery_tasks.sms.tasks import send_sms_code
from meiduo_mall.utils.response_code import RETCODE
from goods.models import SKU
from carts.utils import merge_cart_cookie_to_redis
from orders.models import OrderInfo
logger = logging.getLogger('django') # 创建日志输出器
# Create your views here.
class RegisterView(View):
# 注册
def get(self, request):
"""注册界面"""
return render(request, 'register.html')
def post(self, request):
"""注册业务"""
# 接收参数
username = request.POST.get('username')
password = request.POST.get('password')
password2 = request.POST.get('password2')
mobile = request.POST.get('mobile')
allow = request.POST.get('allow')
sms_code = request.POST.get('sms_code')
# 校验
# 校验传输的数据是否有为空
if not all([username, password, password2, sms_code, mobile, allow]):
return http.HttpResponseForbidden('缺少必传参数')
# 校验前端传入数据是否符合要求
if not re.match(r'^[a-zA-Z0-9_-]{5,20}$', username):
return http.HttpResponseForbidden('请输入5-20位用户名')
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return http.HttpResponseForbidden('请输入8-20位密码')
if password2 != password:
return http.HttpResponseForbidden('两次输入的密码不一致')
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('请正确输入手机号')
# 短信校验
# 创建redis连接,获取redis中的随机验证码/一定要解码
redis_conn = get_redis_connection('verify_code')
sms_code_server = redis_conn.get('sms_%s' % mobile).decode()
# 校验
if sms_code_server is None:
return http.HttpResponseForbidden('短信验证码已失效')
if sms_code_server != sms_code:
return http.HttpResponseForbidden('输入的短信验证码有误')
# 创建一个user
try:
user = User.objects.create_user(
username=username,
password=password, # 密码存储时需要加密
mobile=mobile,
)
except DatabaseError as e:
logger.error(e)
return render(request, 'register.html', {'register_errmsg': '用户注册失败'})
# 状态保持
login(request, user) # 存储用户的id到session中记录它的登录状态
response = redirect('/') # 创建好响应对象
response.set_cookie('username', user.username, max_age=settings.SESSION_COOKIE_AGE)
# 响应结果重定向到首页
return response
class UsernameCountView(View):
'''判断用户名是否已经注册'''
def get(self, request, username):
count = User.objects.filter(username=username).count()
return http.JsonResponse({'count': count, 'code': RETCODE.OK, 'errmsg': 'OK'})
class MobileCountView(View):
'''判断手机号是否重复'''
def get(self, request, mobile):
count = User.objects.filter(mobile=mobile).count()
return http.JsonResponse({'count': count, 'code': RETCODE.OK, 'errmsg': 'OK'})
class LoginView(View):
def get(self, request):
# 登录界面
return render(request, 'login.html')
def post(self, request):
# 获取表单账号,密码
username = request.POST.get('username')
password = request.POST.get('password')
remembered = request.POST.get('remembered')
# 校验
# 根据表单提交用户名获取数据库中本条user信息,基本逻辑代码
# user = User.objects.get(username= username)
# user.check_password(password)
# 实现手机号或其他多账号登录
# 通过自定义用户认证后端实现
# django中auth自带认证方法
user = authenticate(username=username, password=password)
if user is None:
return render(request, 'login.html', {'account_errmsg': '用户名或密码错误'})
# 根据是否勾选记住密码保持状态
login(request, user)
if remembered != 'on':
request.session.set_expiry(0)
else:
request.session.set_expiry(None)
# 在首页显示用户名
response = redirect(request.GET.get('next', '/')) # 创建好响应对象
response.set_cookie('username', user.username, max_age=settings.SESSION_COOKIE_AGE if remembered == 'on' else None)
# 登录成功那一刻合并购物车
merge_cart_cookie_to_redis(request, user, response)
# 响应结果重定向到首页
return response
class LogoutView(View):
'''退出登录'''
def get(self, request):
# 清除session中的状态保持数据
logout(request)
# 清除cookie中的username
response = redirect(reverse('users:login'))
response.delete_cookie('username')
return response
class UserInfoView(mixins.LoginRequiredMixin, View):
'''用户中心界面'''
def get(self, request):
return render(request, 'user_center_info.html')
class EmailView(mixins.LoginRequiredMixin, View):
'''添加用户邮箱'''
def put(self, request):
# 接收请求体email数据
json_dict = json.loads(request.body.decode())
email =json_dict.get('email')
# 校验
if all([email]) is None:
return http.HttpResponseForbidden('清输入邮箱')
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.HttpResponseForbidden('邮箱格式错误')
# 获取user
user = request.user
# 设置user.email字段
user.email = email
# 用save保存
user.save()
# 发送验证邮件,用celery
verify_url = generate_verify_email_url(user) # 生成激活链接
send_verify_email.delay(email,verify_url)
# 响应
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})
class VerifyEmailView(View):
'''激活邮箱'''
def get(self, request):
# 获取token
token = request.GET.get('token')
# 解密并获取user
user = check_token_to_user(token)
if user is None:
return http.HttpResponseForbidden('token无效')
# 修改当前user.email_active
user.email_active = True
user.save()
# 响应
return redirect('/info/')
class AddressView(mixins.LoginRequiredMixin, View):
'''用户收获地址'''
def get(self, request):
'''显示用户收货地址界面'''
user = request.user
# 获取当前用户所有的收货地址
address_qs = Address.objects.filter(is_deleted=False, user=user)
address_list = []
for address in address_qs:
address_dict = {
'id': address.id,
'title': address.title,
'receiver': address.receiver,
'province_id': address.province_id,
'province': address.province.name,
'city_id': address.city_id,
'city': address.city.name,
'district_id': address.district_id,
'district': address.district.name,
'place': address.place,
'mobile': address.mobile,
'tel': address.tel,
'email': address.email,
}
address_list.append(address_dict)
context = {
'addresses': address_list,
'default_address_id': user.default_address_id
}
return render(request, 'user_center_site.html', context)
class CreateAddressView(LoginRequiredView):
'''新增收货地址'''
def post(self, request):
user = request.user
# 判断用户收货地址数据,大于20个提前响应
count = Address.objects.filter(user=user, is_deleted=False).count()
if count > 20:
return http.HttpResponseForbidden('用户收货地址上限')
# 接收请求数据
json_dict = json.loads(request.body.decode())
title = json_dict.get('title')
receiver = json_dict.get('receiver')
province_id = json_dict.get('province_id')
city_id = json_dict.get('city_id')
district_id = json_dict.get('district_id')
place = json_dict.get('place')
mobile = json_dict.get('mobile')
tel = json_dict.get('tel')
email = json_dict.get('email')
# 校验
if all([title, receiver, province_id, city_id, district_id, place, mobile]) is False:
return http.HttpResponseForbidden('缺少必传参数')
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('参数mobile有误')
if tel:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):
return http.HttpResponseForbidden('参数tel有误')
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.HttpResponseForbidden('参数email有误')
# 新增
try:
address = Address.objects.create(
user=user,
title=title,
receiver=receiver,
province_id=province_id,
city_id=city_id,
district_id=district_id,
place=place,
mobile=mobile,
tel=tel,
email=email
)
if user.default_address is None:
user.default_address = address
user.save()
except Exception:
return http.HttpResponseForbidden('保存地址失败')
# 将新增的地址响应到首页
address_dict = {
'id': address.id,
'title': address.title,
'receiver': address.receiver,
'province_id': address.province_id,
'province': address.province.name,
'city_id': address.city_id,
'city': address.city.name,
'district_id': address.district_id,
'district': address.district.name,
'place': address.place,
'mobile': address.mobile,
'tel': address.tel,
'email': address.email,
}
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'address': address_dict})
class UpdateDestroyAddressView(LoginRequiredView):
"""修改和删除"""
def put(self, request, address_id):
"""修改地址逻辑"""
# 查询要修改的地址对象
try:
address = Address.objects.get(id=address_id)
except Address.DoesNotExist:
return http.HttpResponseForbidden('要修改的地址不存在')
# 接收
json_dict = json.loads(request.body.decode())
title = json_dict.get('title')
receiver = json_dict.get('receiver')
province_id = json_dict.get('province_id')
city_id = json_dict.get('city_id')
district_id = json_dict.get('district_id')
place = json_dict.get('place')
mobile = json_dict.get('mobile')
tel = json_dict.get('tel')
email = json_dict.get('email')
# 校验
if all([title, receiver, province_id, city_id, district_id, place, mobile]) is False:
return http.HttpResponseForbidden('缺少必传参数')
if not re.match(r'^1[3-9]\d{9}$', mobile):
return http.HttpResponseForbidden('参数mobile有误')
if tel:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', tel):
return http.HttpResponseForbidden('参数tel有误')
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return http.HttpResponseForbidden('参数email有误')
# 修改
Address.objects.filter(id=address_id).update(
title=title,
receiver=receiver,
province_id=province_id,
city_id=city_id,
district_id=district_id,
place=place,
mobile=mobile,
tel=tel,
email=email
)
address = Address.objects.get(id=address_id) # 要重新查询一次新数据
# 把新增的地址数据响应回去
address_dict = {
'id': address.id,
'title': address.title,
'receiver': address.receiver,
'province_id': address.province_id,
'province': address.province.name,
'city_id': address.city_id,
'city': address.city.name,
'district_id': address.district_id,
'district': address.district.name,
'place': address.place,
'mobile': address.mobile,
'tel': address.tel,
'email': address.email,
}
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'address': address_dict})
# 响应
def delete(self, request, address_id):
"""对收货地址逻辑删除"""
try:
address = Address.objects.get(id=address_id)
except Address.DoesNotExist:
return http.HttpResponseForbidden('要删除的地址不存在')
address.is_deleted = True
# address.delete()
address.save()
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})
class DefaultAddressView(LoginRequiredView):
"""设置默认地址"""
def put(self, request, address_id):
"""实现默认地址"""
try:
address = Address.objects.get(id=address_id)
except Address.DoesNotExist:
return http.HttpResponseForbidden('要修改的地址不存在')
user = request.user
user.default_address = address
user.save()
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})
class UpdateTitleAddressView(LoginRequiredView):
"""修改用户收货地址标题"""
def put(self, request, address_id):
try:
address = Address.objects.get(id=address_id)
except Address.DoesNotExist:
return http.HttpResponseForbidden('要修改的地址不存在')
json_dict = json.loads(request.body.decode())
title = json_dict.get('title')
address.title = title
address.save()
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK'})
class ChangePasswordView(LoginRequiredView):
"""修改密码"""
def get(self, request):
return render(request, 'user_center_pass.html')
def post(self, request):
# 获取表单数据
old_pwd = request.POST.get('old_pwd')
new_pwd = request.POST.get('new_pwd')
new_cpwd = request.POST.get('new_cpwd')
# 获取用户旧密码
user = request.user
# 校验
if not all([old_pwd, new_cpwd, new_pwd]):
return http.HttpResponseForbidden('缺少必传参数')
if not user.check_password(old_pwd):
return http.HttpResponseForbidden('密码输入有误')
if new_pwd != new_cpwd:
return http.HttpResponseForbidden('两次密码输入不一致')
# 保存新密码
user.set_password(new_pwd)
user.save()
logout(request)
response = redirect('/login/')
response.delete_cookie('username')
return response
class UserBrowseHistory(LoginRequiredView):
'''记录商品浏览记录'''
def post(self, request):
# 获取请求体中的sku_id
json_dict = json.loads(request.body.decode())
sku_id = json_dict.get('sku_id')
user = request.user
# 校验sku_id
try:
sku = SKU.objects.get(id = sku_id)
except SKU.DoesNotExist:
return http.HttpResponseForbidden('商品不存在')
# 创建redis连接对象,存储数据
redis_conn = get_redis_connection('history')
pl = redis_conn.pipeline()
key = 'history_%s' % user.id
# 去重
pl.lrem(key, 0, sku_id)
# 存储到开头
pl.lpush(key, sku_id)
# 截取前五个
pl.ltrim(key, 0, 4)
pl.execute()
# 响应
return http.JsonResponse({'code' : RETCODE.OK, 'errmsg': 'OK'})
def get(self, request):
# 创建redis连接
redis_conn = get_redis_connection('history')
# 截取数据库中的数据
sku_list = redis_conn.lrange('history_%s' % request.user.id, 0, -1)
skus =[]
# 遍历sku_list
for sku_id in sku_list:
sku = SKU.objects.get(id=sku_id)
sku_dict = {
'id': sku.id,
'name': sku.name,
'default_image_url': sku.default_image.url,
'price': sku.price,
}
skus.append(sku_dict)
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'skus': skus})
class UserOrderInfoView(LoginRequiredView):
def get(self, request, page_num):
user = request.user
# 查询当前登录用户的所有订单
order_qs = OrderInfo.objects.filter(user=user).order_by('-create_times')
for order_model in order_qs:
# 给每个订单多定义两个属性-订单支付方式中文名-订单状态中文名
order_model.pay_method_name = OrderInfo.PAY_METHOD_CHOICES[order_model.pay_method - 1][1]
order_model.status_name = OrderInfo.ORDER_STATUS_CHOICES[order_model.status - 1][1]
# 再给订单模型对象定义sku_list属性,用它来包装订单中的所有商品
order_model.sku_list = []
# 获取订单中的所有商品
order_goods_qs = order_model.skus.all()
# 遍历订单中所有商品查询集
for good_model in order_goods_qs:
sku = good_model.sku
sku.count = good_model.count
sku.amount = sku.price * sku.count
# 把sku添加到订单sku_list列表中
order_model.sku_list.append(sku)
# 创建分页器
paginator = Paginator(order_qs, 2)
# 获取指定页的所有数据
page_orders = paginator.page(page_num)
# 获取总页数
total_page = paginator.num_pages
context = {
'page_orders': page_orders,
'page_num': page_num,
'total': total_page
}
return render(request, 'user_center_order.html', context)
class FindPasswordView(View):
'''找回密码'''
def get(self, request):
'''渲染找回密码界面'''
return render(request, 'find_password.html')
class UsernameExistView(View):
'''验证用户名是否存在'''
def get(self, request, username):
# 获取参数
image_code_cli = request.GET.get('text')
uuid = request.GET.get('image_code_id')
# 校验
# 校验用户名是否已存在
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return http.JsonResponse({'code': RETCODE.USERERR, 'errmsg': '用户名不存在'}, status=404)
# 创建redis连接对象
redis_conn = get_redis_connection('verify_code')
# 提取图像验证码
img_code = redis_conn.get('img_%s' % uuid)
# 提取完毕之后删除图形验证码,防止恶意刷短信
redis_conn.delete('img_%s' % uuid)
# get数据如果为空,会报错,所以要先验证图形验证码是否失效
if img_code is None:
return http.JsonResponse({'code': RETCODE.NECESSARYPARAMERR, 'errmsg': '图形验证码失效'})
# 对比图形验证码
# 注意:从服务器提取的图形验证码一定要解码
if image_code_cli.lower() != img_code.decode().lower():
return http.JsonResponse({'code': RETCODE.IMAGECODEERR, 'errmsg': '图形验证码有误'}, status=400)
# 根据用户名获取手机号
mobile = user.mobile
# 创建token对象
token = TOKEN(settings.SECRET_KEY, 300)
access_token = token.dumps({'mobile': mobile})
access_token = access_token.decode()
# 存储access_token进redis
redis_conn.setex('token_%s' % user.id, 300, access_token)
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': 'OK', 'mobile': mobile, 'access_token': access_token})
class GenerateSmsCodeView(View):
'''发送短信验证码'''
def get(self, request):
# 解析token,获取mobile
access_token = request.GET.get('access_token')
# 创建token对象
token = TOKEN(settings.SECRET_KEY, 300)
mobile = token.loads(access_token)['mobile']
# 生成短信验证码
sms_code = '%06d' % randint(0, 999999)
logger.info(sms_code)
# 创建redis管道对象来用于保存数据,能提高代码运行效率
redis_conn = get_redis_connection('verify_code')
pl = redis_conn.pipeline()
# 保存短信验证码
pl.setex('sms_%s' % mobile, 60 * 3, sms_code)
# 手机号发过短信后在redis中存储一个标记
# redis_conn.setex('send_flag_%s' % mobile, 60, 1)
pl.setex('send_flag_%s' % mobile, 60, 1)
# 执行管道
pl.execute()
# 发送短信验证码 (后面参数信息为 手机号,[验证码, 有效时间单位分钟], 短信模板ID)
# CCP().send_template_sms(mobile, [sms_code, constants.SMS_CODE_EXPIRE // 60], constants.SMS_TEMPLATE_ID)
# 将需要执行的任务列表存储在broker
send_sms_code.delay(mobile, sms_code)
return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '短信发送成功'})
class SMSVerifyView(View):
'''验证短信验证码'''
def get(self, request, username):
# 获取参数信息
sms_code = request.GET.get('sms_code')
user = User.objects.get(username=username)
mobile = user.mobile
# 短信校验
# 创建redis连接,获取redis中的随机验证码/一定要解码
redis_conn = get_redis_connection('verify_code')
sms_code_server = redis_conn.get('sms_%s' % mobile).decode()
# 校验
if sms_code_server is None:
return http.JsonResponse({'errmsg': '手机号有误'}, status=404)
if sms_code_server != sms_code:
return http.JsonResponse({'errmsg': '输入的短信验证码有误'}, status=400)
user_id = user.id
access_token = redis_conn.get('token_%s' % user_id)
access_token = access_token.decode()
return http.JsonResponse({'user_id': user_id, 'access_token': access_token})
class InputPasswordView(View):
'''覆盖原密码'''
def post(self, request, user_id):
# 获取用户
user = User.objects.get(id= user_id)
json_dict = json.loads(request.body.decode())
# 获取表单密码
pwd = json_dict.get('password')
cpwd = json_dict.get('password2')
access_token = json_dict.get('access_token')
# 校验
if all([pwd, cpwd, access_token]) is False:
return http.JsonResponse({'code': RETCODE.PARAMERR, 'message': '缺少必传参数'})
if pwd != cpwd:
return http.JsonResponse({'message': '两次输入密码不一致', 'code': RETCODE.CPWDERR})
# 创建reis链接
redis_conn = get_redis_connection('verify_code')
# 获取server中的token
access_token_server = redis_conn.get('token_%s' % user_id)
# 校验token
if access_token != access_token_server:
return http.JsonResponse({'message': '数据有误'}, status=400)
# 保存新密码
user.set_password(pwd)
user.save()
return http.JsonResponse({'code': RETCODE.OK, 'message': '修改成功'}, context_instance=RequestContext(request))
| 15,067
| 2,467
| 578
|
0eff8762648ff5eb81e2b701d45e3c0febac192a
| 1,654
|
py
|
Python
|
src/archive/clay_bricks/PatternBrickLibrary/patternGenv0.py
|
JonasWard/ClayAdventures
|
a716445ac690e4792e70658319aa1d5299f9c9e9
|
[
"MIT"
] | 1
|
2020-03-25T10:55:10.000Z
|
2020-03-25T10:55:10.000Z
|
src/archive/clay_bricks/PatternBrickLibrary/patternGenv0.py
|
JonasWard/ClayAdventures
|
a716445ac690e4792e70658319aa1d5299f9c9e9
|
[
"MIT"
] | null | null | null |
src/archive/clay_bricks/PatternBrickLibrary/patternGenv0.py
|
JonasWard/ClayAdventures
|
a716445ac690e4792e70658319aa1d5299f9c9e9
|
[
"MIT"
] | null | null | null |
import Rhino.Geometry as rg
import math
# grasshoppper variables
# geometric variables
base_points
normals
layer_count
layer_height
# pattern variables
min_max_val
period
phase_shift
base_layer_set = []
for i, pt in enumerate(base_points):
local_pt = rg.Point3d(pt.X, pt.Y, 0.0)
base_layer_set.append(PointWithNormal(local_pt, normals[i]))
print(base_layer_set)
curve_list = []
| 20.675
| 87
| 0.643894
|
import Rhino.Geometry as rg
import math
# grasshoppper variables
# geometric variables
base_points
normals
layer_count
layer_height
# pattern variables
min_max_val
period
phase_shift
class PointWithNormal(object):
def __init__(self, pt, normal):
self.o = pt
self.n = normal
def new_pt(self, scale_val = 0):
return rg.Point3d(self.o + self.n * scale_val)
base_layer_set = []
for i, pt in enumerate(base_points):
local_pt = rg.Point3d(pt.X, pt.Y, 0.0)
base_layer_set.append(PointWithNormal(local_pt, normals[i]))
print(base_layer_set)
curve_list = []
class PatternMap(object):
def __init__(self, base_crv, origin = rg.Point3d(0,0,0)):
self.base_crv
def subdivide(self, div_length):
length = self.base_crv.GetLength()
divisions = int(length / div_length)
self.div_length = length / divisions
t_vals = self.base_crv.DivideByLength(self.div_length, True)
planes = [self.base_crv.FraneAt(t) for t in t_vals]
# self.base_lay =
return planes
def generate(self, layer_height, layer_count):
self.lay_h = layer_height
self.lay_c = layer_height
# for z_in in layer_count:
# z_val = z_in * self.lay_h
# for x_in, pt in enumerate(self.base_lay):
# scale_val = math.sin(local_phase_shift + x_in / period) * min_max_val
# shifted_pt = o_pt.new_pt(scale_val)
# local_crv_set.append(shifted_pt + z_mv_pt)
# local_crv_set.append(local_crv_set[0])
# curve_list.append(rg.Polyline(local_crv_set))
| 1,065
| 13
| 181
|
f13473ccc5629f22523e6eb0bf6ea63fe8b6ee02
| 2,153
|
py
|
Python
|
python/oneflow/test/graph/test_graph_debug.py
|
zzk0/oneflow
|
ab15f5986ee0081da5493ee63d3f2acf063ae229
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/graph/test_graph_debug.py
|
zzk0/oneflow
|
ab15f5986ee0081da5493ee63d3f2acf063ae229
|
[
"Apache-2.0"
] | null | null | null |
python/oneflow/test/graph/test_graph_debug.py
|
zzk0/oneflow
|
ab15f5986ee0081da5493ee63d3f2acf063ae229
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
rank = flow.env.get_rank()
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n4d()
if __name__ == "__main__":
unittest.main()
| 27.602564
| 75
| 0.699025
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
rank = flow.env.get_rank()
def _graph_debug(test_case, ranks=None):
class DebugGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = flow.nn.Linear(3, 3)
def build(self, x):
return x
d_g = DebugGraph()
d_g.debug(True, 0, ranks)
if ranks is None:
rank_list = [0]
elif isinstance(ranks, int):
rank_list = [ranks]
elif isinstance(ranks, list):
rank_list = ranks
if -1 in rank_list or rank in rank_list:
test_case.assertTrue(d_g._debug)
test_case.assertTrue(d_g.m._debug)
print(f"ranks {ranks} rank {rank} debug is opened.")
else:
test_case.assertTrue(not d_g._debug)
test_case.assertTrue(not d_g.m._debug)
print(f"ranks {ranks} rank {rank} debug is closed.")
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n4d()
class TestGraphDebug(oneflow.unittest.TestCase):
def test_graph_debug_rank_null(test_case):
_graph_debug(test_case)
def test_graph_debug_rank_0(test_case):
_graph_debug(test_case, 0)
def test_graph_debug_rank_1(test_case):
_graph_debug(test_case, 1)
def test_graph_debug_rank_1_and_2(test_case):
_graph_debug(test_case, [1, 2])
def test_graph_debug_rank_all(test_case):
_graph_debug(test_case, -1)
if __name__ == "__main__":
unittest.main()
| 1,062
| 27
| 179
|